var/home/core/zuul-output/0000755000175000017500000000000015136772715014543 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136775662015513 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000111130215136775612020262 0ustar corecore{ikubelet.log][s6~?BʼpVcOoh3SDB!A)Yd53uvRk(pG{⒓(eu6jVoǓTZr1d5ςx?X^.ۚV t_oQo,єRʪ99>nq.X9vU{ /s\ԣوJ.ʧώjW;$em3kl3gI|`vj\.O/ 55[5L/Gt4oe ׬nE kY킗NlѺrq*;5|EsA4],h=S3Q,P`32Q;GZFg.㍰ yz;$x#=Ax!N|8:%+TsTdxE]ibZ4b3jHՉGŞoG7gTj~%l(0#e[H>0bZ?5;\֟E#` D+$}"yVDB܃L|}q޼(p3إq5Tt0wpvKZ8?R׋Ió5=+(_]mPΐF/Rp H~h !%b7#q^iI+`JISIQxĬՒ!YOrtv}I`U(L}7%m/KCp77 t5/,?>!=1FxuyV6aB]1y+Ɂu=kDI*;|J&yΕH %A&ъxO☱l4ǻ9Jxy*(<%jQ uIgtF *ҏqbo\~`7׵Xo+ s9gl.7̮`tT8(P~.Rk3S(0Jnʰd{ 4~/:08,ĊdP"#qo*kFW+āGe?i?$ީ[6]ȯyunzڰ1 o|׶ʏՏ$DQON%l3 ?_ Bx`:Zf+d?uQ;f7rex1b6hKHpu˒x߀O+TʌtVb-y: KtX~vk3) v"x _4RL{V9]rV{Ix|!}jJ&bɚg0':F*΂0~`{prYp8x*jj '%ec%7+'Y&RSܰRxx<4HW%&F0PB7đlC7鮴6UuAC :k]` ?3Zْe7v4ۋu&хX罧9ӆpܽjyM/dp1XV )/rǁ>)[(Gܶ w煲YJm{w)f>tfpq 7P"cT@EBܙFXr YTh:g1mD"-ٻc z%."e%/N,ڦ%{k:c Ez.8@I!FZ'U:eY[|R qqLJ)D䇜]_N]x7L1D{Vv\qg?v-'kڹ'KӋ$s(Aо2A<ko6' WiUKQr`*´n}v7|Ϣ?S0U4~~TTe\|z@ !mz+-x/g9Ɉ xa$ <$q |qH@^<GBEP{^{ lj2{n&ĵJ#=(AdL%=? STYj2pȠA/v_@`R P@&S@<,#x?$e#ë`ʐ>GQ x ^  Jq92u{WF7LUg(x[_AH]EFÏL~:9\7GXwjiˆΙaG75-EymV.OT "1kͪq ss\*RtzR9zz4EG ɀ%2U_ IdF"jGO&-騷8L7S‹GӖ pv3gJs踪Ev \F)kZbu;ϑPb3 QNphîTm:W7I۰G=N^sNMVGj7ۤ$ *ɂ+st5Qml0Vx&jZw5Dq'1-6 ;G59T${!R)m*8:7>޾~yFH@ɾ pVNStJ;D2yBy^Yͅ1nDvNE4;^ȝrs9&UpMbITYK*g߂{'Z'7'ooAyA%ȵM© PҗzN8DOJ:ͨAr8xx7)lg‘Js:TJ06_6Z!gv,$^Uj y6)=ٮq@;%FJZ%űoRgmXpqOT:d_( 6R aL8\5-@ D\R^>ڊ7YBL zox?t.lňJ  :+`mZ.bqhmY)Ҿ%듎u |TöbΑ=q0SoYX5>'I4Ls%<*l)0"=M;fST^(ъf\;EtF 76ⶩYb@ 60b*59iy6]] ©-h=sr>!|"${Rn=*6he=,ٝ?(ޓ1@Y{եs P@R4x.v'Ha;Y케e åqZ`D6i|ub&.-e+BRVkeJa)S+7H 9Giot+ʎ$DXmm Q#"f| ,6QD ) 'dXt6c]%rP=⏽8{~EA4p**xsĔXSK3/ s},Ycס*,'Y%e~ĹQeiΘr?O({4J$ n&EN wLX s \Wf5 &lԠ+ 46 bJzs9Da|-g1i{*seq(1(}*3YBl貇,%{ҽr(*#{~r%L$:˯SဘҨ7@Vڙz Z gʥnXUQ,{h֜U[v@ 7.e`{'!gGp$*5Hԟ'8&33m[PEDQUYp6Q=Lnu2\v]z(K . #4eGhv¸3= TSuW?jV~{4< `>LYײhK2yTFĸ.4-ך,M" Yw#=FXP 1C/E% ~ HsDM&T-$oP ֠g(/lw-^a2+ 53 XON_ׯi;E^شH/ۈI8 8 ` 5IhG[= "/׷^0:&Dj"Z5g4z ( p0L9Hm^0!a * %џ.\$!f&%H%w"mwj_(udpQ0C %_HB ' @^8H_as5MP@:WQ/CjL"D?.`^!}%Q+ӎDkEa@(խ`RgC"DpO;`Z(sEWLy  -@BjbL쭌B!@ē^T8P88|J6`:+qᣚ1ޖS }u}ԞCͫi;+l&PGQ8/T-;} ))Cd?g|bG^fAXԖ 6, ۷NK堠 6G_e)1-[غʒz-nUVʧlg DP%o@|[OfmXCu8}v/) ¢.O 𠖇V$;I{Lg~&cK'#SV,lD1AgگZ^ޟG]7Lid~룪Jm=0;"ת; ˯mG_Epw#RH ݤ-isI"%9ֱ|4f-;N ޡID͐3^'Jm+A2w_/i{8x1S{ro"2`[u3=_:8.:NgʎAfaSU髷t6TqQSͳz~X:`R5/VOT,y;syl:~iy\t1û0Uxr>LF7O o{8uBzB:p߽bC@M $c{i\.t =7G<00[g\" UI.i7+; aEDP";\* /t`vݴ&{.AJ#P9VY/F P~$JkXBߞx/#"nA"Bq+E`9d~a4AX ~~\<Vo'㏠<>"0˻`DVDN! d#0"IX!Ҫ06ٔ_Vrb·' ቬLɇ RVP#( &ҕЊ"mq4&9-ކ(z>Gt2s^azciK<>[yJdY xgwQI2"ؐ=c7[c - {4{gwF' \q*|n#D`0C'a2-B.EX&B[Ddυ00g'Z·iQ<˽ i0Z[Y/DM( r/ zb3|KiS$,=ܳ+DD $R* }^IHfW2_M=$[(VeL-xg>}"AD P)"m\ÏFJg'uIj7,-kg\Wx('K}`>5XO5ΊKǶ}xjXd$6Eb{7{# V斞iNQ站$ưitV*ZiV!;9@"VڀzVFq:vx82O}=YAδv^:/pRq(v6${<[Q0R0_z1؛ i)t'2-B (&6Kj2_VÖM|ͽgM8Dgù?ǂf;% [{7 Ȫt̳!PCKj5A"/RbW ^ E'\es#sJ,OFw?IHGm~2z ?=IϹ,NF-.g')gɣP<$&xDa@ԪhQi76} zH%#4.gp07gMhQ`Nқ iٜ9UXdE3OUi&ù7 %#KU0yI"({yt[\3M{D?GtlfhQKz;*.-n[WaᰂeA7I\w?*[N!꺨_`0/.*WJEK~7ʂ(b K(s.S9*B0§$RP&<*q FGAQ8Apeh:LLA5i3q}9,@DGpGKra%a܈WL1B=,TF?BKpG6ֺ:eHaAafW /CP^;{&pOl$bі>M+ DfW'1Q( =t,%RgK5,QE/@Ol=#cjnT/(gtE2Nan%>%cd9ErHXj]Swē4Nq :`bkt Ƀ׭ 6.< ڰky"[kD{j/n&i9PwSu5!up9[kK*wUu کI%Qq;4'%EkNO·jVI&|UmlJН|4?=(wUB}.ZɎyU1Itت4Ր5zM3~ mA[E*PQG&~qI(['@(۝P8B e1l1eTQeVӸnN#}>xpw]N-߁X;qRw _'߁PwB?PBuB v'4x v 4\'4܁pwB>pBuBv'4z v T*v TNx⁄*G| rBuBw'4~ NtJgCPwh󴺞:Bib :)3lZ{6$S$ &ʦyu9s';sTQ)CdnѶZԍօK H[>ۭ3'C ԆlXLhEFgQ i9-M y)S'FCtagsӠ^bPbw UऀdT}InD0@qJx1о2ѹRw׹SX j :G~2Faƫf;xũF ՏN\qXf7q=QWsDRq|Lʬ6U j(C=޶G(m^濻 рXW~}Ǟo[Ex^+.WK,tw@́yjt11]a'݀檗-w5?B_xjKi2Y;w&dVf %W.ˢN'F9AzG%]ic:1W0W&\Tׯ.ŴNeI7åڈ(~[4͇t ON' "ߋ\a2mC4ސ:Й{TNO ST?^lP0=b2O"@{G֣ 3,$-)"͒i> V쬵ph.1oUw0)k-#5/̛e ,h)=^ xp)ErMl-[zOvA4Nj|W0Xwf#03ݳ^Ŕ3-0#QǍˋuU&zlϧB9P(MC60owsV9Uϥ2i߽bb2Qߐ #`A=ȸo%S<1Ӂ]t]gy$TStۏᎱ6A!NsZtqB1+injN_(`t,/3X9`Ɉ =X1  h"tg%/[Sbi:-W>D}Iv!5 h: &D˚5P&<^ɜg䍄vDeA↖fN\uȎ^rz `G*>k0)А}Q'=}L䑴6Qrn qLn[e]˧bCL8]]ѫ[b}@ 38*wCeV~7\W`@{v1fVQ P0#oU`Ye{E+4i)Zxgga7Xs֢;`ɃNe"}'OptĝmQ{y28zQuVm:h!7{n16 RU@9Sj| fg ^zcE{0=uȨ7u{Dv"P >Ʉ]1"!;NןΘdT ܲs1Ź4Z{ԃ+7hl!Oѹ/(v Թ଍4[2BI1]@S)YPX =W)h6&)*cc5էX_1Qnc梓Nv@f˛b21QKy+Ip$S /5t/~b?E.K.y:' F8Q[dN;}1XZ~x[|9AL?||g5 o[rmQ5ܝ~lbZ8)Ni]zKIgdž:6?@XKԘSݮ:OxnE5^>O'H6iԻWl,"ʟSfufe?;EsmÔ;xv黎?󁈻tmL>~:Yvr+T9Ө[ƅtA+.gI)0nS]lBF )Ō|7N]FH*s=|Ґ@FҲӉnG(Znj[?4:5P-ڪUnՍio͎ݕGtlHɜVpQD@U!ڠhSΰ7|X ~Oim  :UF~*|([P>F9 y$}];< kbqF%$_) D5lZ9 ΏPC.Y4IՃK̂V[Er"$H=1)uڄ_-c%  %I4)pò |yL预+/X%G#?{<ʱ3W r*Q BTrֻ̳K8nS쫍 vU#s;ͣ[+PDn BP,ѱ9Aci%#=s1s0;G!`<)EGul(53MuIִ'B rzf!Ctd7OXhJ*3UP{Ehjbh" 0k,vT5uJF4β'1$XR$KokXsSm'!YǥEzjHtZ>tяU5EϠlG .W:̙bdWTm Ϊ'zIXdJmnCD E8N1mp?+ܧ`T*-Y/<{b[E} 183}굝)>ʵ}m@2.8 yVUYR%yuy [:r!x^4yKÑ"xs%y%[Hj tYн QꜤzI]k(ͮ円FzYuzrהNQ Ratd-ҊcjE$Y : g\B/nrpkש`EHzx(Y4[xDWo]4]v-/ ,=eHS᩟tcv 11(5dI%b\Gtz8*kr=87pwwI>q,?.}T\; I9)B=8h- ́G  o11CtUJmX#"ۀw膔RĂd,# 8/"iDqV$ J(qVpH5.G :)ǰO?گ9~NjF0WU1~^0Z(0AF&Hq 9[ %g4LW$*ZAtxVmfG4Ca0$/  ϹY8D4 o#o̟ lVU/xvT HiG=dָ@J:7gJTTj߮7M[Yh Eү`2epOIT :3Jn"f>7m0kcW<&*Èe%f͵bwBZj{M[ + KMJJ^bcܪKHmk|PY@frB2UƘ8BeH}<'[S2yp5RIŢ*)[Yuc> nUʢ)( gK)VOܧHt{Em@oe^ї!5 mQC1HeGY?Lkʜ6Nm;d3|Dj} ; g= ނB5)ZSj!e;X/V ҃q16|^-Z+(+P o73V1ä,ER;0g`ٗ)ٕmZ1ΊeuրO!Z=ScY *%<{ItщJT(8c=Xg-:c[L\(rP8z*d3-r0rP EҀ=g}Ꚉm&x} FUaVSRx(Yq(Q(#no[> *=dTa^.8a9*=:g͵OQT-aS'7TZ$<;c^6K98uІ^;)r0Ĝ;upF@;Uvrp;r^^bO78ATJ$BhzѮ:5x8`:m˻b%SD "(!F.z Dta%HdH:.|R.8jx) od5Xb62σH\()Č^;˕Ū,{Ֆʣ!T╥H:N+|ԧ8j.J,Iݢ A}^ߠ5*,Q*Y^1zAm$(:ȡt^պ=T7i蔐փc 2!4䔄(7k$INj\r^Q6-)k :w93%8-L%8ewOaLyѳ2VScrkK" fS=u~k,e Cusǜ:a=cקLX|cJaEe>O4Ph373FZ![vpe0;tjBzքIIH5 ]pTr=h E b/Ji{`uX}`yJ}a>lS.O5Z?OU@G_c)0vZ- Rh.Cht@ߣz/qz%*#?#Fd>df-1tc֬EFT]%ruQ;ZŰmOwҧk =ǂG;R'69Y )N]'0̟2e{`/-;wS `>uwQv<}(͉-<噾#) CBZB (I"XB"$uVn_Rф'&g |QB/88i[m9ܡ!cX*FD)=ERx|fz淼o~qsV!)6hxfgʔ.Ko+M υT5V[ s'&$q%t\*r!-f$ (Aq oqoYn]["ZI,v#GlPaHk {nk@J9LJ>y2KADNPd%)mqo`s` ʀ>8ciT,q 6#dgѯ5`0J[U}3 <Ps n˸H:9O'8*4Kٍ6Gr/PpiFAFV*u>yǞܺf/ZICwmZ/QYh8bB" b-/0m%ѳzkqԾfkʾ}u4Ih920|0tkߊMMxe @wɄ f0 8Q22LJN>z#TMD5F1FTNwQĊ `L1($a*Fq8L=S72~*n6%\ cKV9ߵivM:fl6@7LjX#3aE,_"OP67 sX}^ܬ\uMGv E%C=RKW&7:O1>ώ: `PEڴq癀m/ա8FI$# !җ`!*i-+'8>2/l`"JIalB+U#{NOd$!xcCK$iyHu>%эU0 5*Y$=7OɋOrZ|HȠ/6te$as06T%"92a 8y ui1MR?L'ΦN9!+\6?q'8~o47c6bl&#2 3q_GݰlZ# WFz.FӵFǝj;n>ۊ"{]H5Mury>:ug֌ޝh4hswbG̈~!D:G Th˫q()Q 0}s>MYS{fwmԸmҲS! Р̙EaK[*>8Lİi1G?xv{#vJ6d|%W[ }"pc{n,iHOcg M *J1iJ0FXY&-FZciuS|VNpxfVi)u@P|ökF߃6.ŰҵfzFke쯿\({nE20 |,C3R>(QXZFڌ6#ɰJ24l6յ$si D@* Si[x,H-;ɡBPnl%$-Lmjq/cۙ Y~tfw!y3$ԯO.z^53yޥ@tI>%yvbf6y~>*?~?33/=;:Q~تؒ].S7Tz~<@Jsh7fYGoް.+͂Z"H|ZOeMOxgƚ뇽Ae5NKitgU]P.\b&RN'4~we]ՏCSO7đ dT%,vL&$g}HS;su0UMϺW2࿭19d^pZoK 0sf[\ޭVTߞy%<]G,X:K?w>괱|]d :stR` |K/Ǻ(μaGWٴ8;=ug/S+N>R'du:;&7E6#s~t,zT`DN$= L}jq^2<:6ZrveevN,Om-ى7DVDCPv_[z{;_u. 8ozȍXtJʞf r4`AΕj;)M/CI&R ƀV$ ëW>6 v:|ߣl \lYЪ>_Դ m:$<hfV)(YA*vz^_>9/+?Xmvjd%=Xefɡ$pq3xz68 }(*ɶe 7X޸bEpDGVGG?'w$I#k\g],q|?g[ꭀ7:6@o=X P?u μ<0Tf MNM:&棗M4Es$*`\ GTrDA|X@0 a$hqV Ad@Y% Ȱɝ`H0Xu8:7@J6 lfRm %.gC8FB @(,S`%h@Bć#-%lX@ِ X 5HEa7 հ|,xȀR> AWŀ,XV 6'퇙H:$0(`lٯ_G7*mV{*S`iKC1M K*}R+(76N{iy t H'񾐮VeTRB "(!p.ņVQv}Ml9[%ɿdR]Pj+׷&dU⪀a /yy { xط{辰~8,'zΰm$cW{Ud6com̍;<5 &nF"u4ajB2Ba5klŶ}Eas"kC)~m'ꖁ6QoF,G<}:ڇ|`lwG:ہBNe( d C!T_kѣl:P+B"YǶyM{ͤ8r~:0N<c)خ)⃃S"^@r9_>龷1`@C*[llL>՞J(,yIYzuz{;¤mQG{67|Qz ٺz9#!۞[$S=o+ QBl!a$8Ƅ_ yG l#ؑmnܑ$mitBYbqi&դvcxYOޙٯӪM lLx~}dGX8A~jcf^W3ƫ!cI;kUe޸CRC@l=7 Tq |®H##> 9ˀSOϚ+Ÿx C ;ZȆBc9 N(38:̄Gx2v} 7g#y3}l?PSJu0,p K3 oLQz~Gkgu*> =>I.!I\ʛ~M@c4ԴV VDr >YtUvjΐW>‹'8~8A| \J8T(O#.$A1QcGq,@Fcc8 ̔`x7yWzg2$e =Nt+4S><iϬ︜Tiy!HcSƶ#&e !\Di\8 Dq $4p5PY"Ӛ25etet!VC¶M$ADN I#qhDSCEi!h`/0$^֔uNuEЫ=7i\wxU"eK%U ~.M }$z>ʢ&YoN {8:oEpH*89Z2P$8٘D4F#>J:JDKul'@áma&e0*B "c"~8@$ M%f:Hqy(# u?PY\2 O%MNҪ(||ض3e}Y}qTSe2޼~wqzYfs͋g_'WKJ09^452;U=k4ے8y3`Q{e@?f 91,X6]pvDC!q G^ӣ"iwlf7>^ /KO/߽M!kW*e وJ ٚ߃!e)~VVNl}?f:PiͦWm=Y |06U2+=MWc@~mS 6/ ]N@ ])խ|wϙB-QNP@A ]4M[mH.h޳IXȽ]_鍍4rtQ@s G/Ǻ(v7 Fɫ"; 438>z'|00Em#GEg`"\n1H< & b&c*ג3 ߯-˭Ma{$EVSE{Ts5u>{`>[BLF5Ƨk[8۸'ZWϛB͏Ŗb懃Y!&n]?k~0R'K YכLl/2%%շs:e1r>;/0vLBKyJr5chiE G8- E"@pFbU4OK9@Vףf֠t0tYb\W#*#K Kp^%UD)7u1 \ \P˒SBBP*RP#vƩؐ7Tѝ͙/b 3&cm @V:32fca퓱VD6v1.9-k^4we2`Q9ԒR 4> eMa\^!c$U%{U칢"{,Y =0ddE>nq09Mo*p}?d K ThK jWLðq?"K|1@xM 1 VS_ղ Fj.*s(Kj+;e$ 6BQH&KiLR/ Pn '5罁$!MF3ݏȐ;˳$2|s蚑oƘ[C=J~ôKÒ}";]Pr6OO!Q{͘D^ %t0E Bq/u@/%u5"PJ87R#)LD@Xtp΂A! a zjѦM-rO[))02yJ`=4e> =)Js`9h!N/GDqAb$H*T |%WR!8 J&B¾z/ܠhVŝ)a\̖_3|G#!— %PTe|gVFmH*FRg\۫ p*,wkÐ.Պ~_S0tgypRh.@/bAK'J㐙%ZYH bf-P \/4Tnڀ25FS.P`:~7V'+sRVXNJotE(+ [ 1xMA\pŗ"#Bq1+JVl ge (`()[L:щl4ѥ'a%(7 ȴ$e^9Ψ"Y+YJ>:=%i!b!`0~Q&CIO M9SrJ`,!haBveh_J+<0fH.JKF`t<;oӐM2i M14DD6 t9i! 'K.QǢd,ښ`4{]bOI qTϝĔ Cqm'{`T,J\,$MAt C66Y0r6SW|8Ī(LJJ&O>^OO`G&OWT,qK)*~J GmnآddH=-JUeozU͝$Ds{G4BLR%XKHZw%TB5o*UlMFE[bϯF]]՟{VѢM <i2,ѕYQբnFeqb!Ҝ>mQ4JՄTMWNhTJ 0oگh91׋qOoR;|)-,y8Bx!b3'~Ƀg7|~a%O|D%UaJ+ܸxm(5JCDbrSWz>B@ ⺉J/BYZK[j-F8K%Ib&naQ|aP LE%E>[BEIQ $p%KYRZ'Jhlj=xOH)oUvDԳyfj:ozWh$n3u˜' .9a@[r ֻyXKB,=JA'^_ުYԳ=~Mn{$Pd}L..hE-@8xdH$hQS.Ԛ˸*됋ZV\\(Ȟ\\L&r?OX ُ;B*9 0fycg Z& gV! OxOfL LAY 4oW~JZFĖ6A$sˣu$E$" `!YpV3{Xlͧ,iXٶVm#nr)&y NPn \ឈVJli }o\d.,TBW;$(W$;!RAĉ@9jjષB Ld:j={G|BG4mBiӒꞋJ'63.Loɀ!h)~8MP9d$qߤ- .%=ҲTh% GkHoOK+ƥYp5-bփx~0Ńg8x[|ƄEK.Ύ.&S"^)wt/cC4> cqBN߼v^o+9|+9^y;4Iܾ*0GVE+Pis(AuBp VIM_;S`*ْ w]4}/ABk'ۋ_Kw^ b"`W\* mY!0Yuڱ7⫟)a8&ߏ&okEa;'p1?giݼl{<%g=\GST,AƅV~L^L?ζY'?n2Wؽ?sª»3P1z99.:ugl>uOW& 4B.zu%”+X?<<ۻ\eǫ+0-(W]=[V`jSe"kz&.78TϺa٭_wQ`wt|;9|#wn.:~r|T7nhQ{Dm ʇ=8zUBŬPgЋ1Zrm!'T_MSfFI h}eLUc)j3,fKŃW`-oXl}-g%'666"^Z[_)ηp^= $B\sQzҬJ{ C?F vCBH&寛N_]qc;At[v,΍fu-M)ju M/U j ?MM*\_R]ۯUG7O !g3խ:IU͙3n(gZw/Gsbs=k,s9ru [_xGcH}^ҩ owy>{g?o}9~sLc_q* ߣs?~ aCv7vwB1<.qhg/טsV-t9:~[2RW./p_1 ~d!p9ZRb O$SOD 5Zo7+dS!=<;>[;b8wGa2;:%:Ʃ~ ^0{sqju2eFOyzpfS"x̃,0>uoGcg~+2ݟ^\'/\<%QMBn6BW鄕5=P<ևF4:!87ˍhɁHe`郁f5j dPCȝQਭRPR|tVHB)vg$ص(ƽ۱k1#];.]Է Jי5E"{C>$BCp2J.%[ !sU{!N8 9 Ī[F5Ee7Հ=PSJ {^TR4bGJ*{^F l?8<*cD@{sc!:5fWM\Jv|Z7joz Lie5_᪞'JM_S:C0M0(kC^C0m0)ijBB,]fk7rI1=he!9pHd4VA[kvYQS4@㳡<NITJ LhgKTTNwH+?6YEVm>\\vq_tc!+˓ ߯(cIvS[3,߇/E6هZtzJϫ,F]/8EF$ HNLFqA׋: }"eb"uHetqoZNu&#WV]:6djx/UY[>WF32~ P jNf<"ZSKX6Rɔ-Y>c.WrvT~1B]x*^g|zvt8]0kVd=xawK\@P>Q6RTZqݴcmVII?NJ-Wŷ&.n{ҭ*c{tcqy&~:OL?ysQw[&Bk]ts2@ ՍS5h32rW}jٹɵtgWu36R bF0\EhgSzjњU'IWqG^RT֦Ge1FjzT$& 6C WVdj*r`j*b-ik7^!Mpu*i֟3gzV!BS! 2%;",k!e5i1+kzd Q,e2d@S[Wp:@1d4p:xS P07_Nk5 kEm}eFn)8ک0(ѴLF[+-BWc$=zJ:J<3d<#TVYYeյ*U&V=*Ƞj` fb0h.9u$ Tߞ""ʬdJ xo]AUdcY h gC6OCA"d撪x>UQX%UTEƢa;Vwt4V &A LAld؄%F&FSgLrX2d4,d%ʶ%;/8 q8,zj\neQE8} S}Xdi>WDW>ruJpoZg Y(3rlP[vY 2YPW SPs GXZvV{(g7#x`MꊕԽ nQtߔc{:{.H{NgAf0h 2O-e%ϔMkU)+y1xrutЊ-ԅ[>k5d<ƺR[ȶUXԗ7,@6a@5d4Bi)ȟPU%e~@Y 8o3u@{Č20j0`h0̋7U/^Zw=D9g?#hbRT_ _y 3cIyk^k6eBtY 2h|RӆP>͒Ozu>)wؔ)5*G3yDCVj".A+Lʲ*A %cA ܦJ8AC䎶,'M))Px_܃gq7+QV>cOuV蜴e>? 4b^?̗w:Ѡvzt-g0d

 !y? />5rnjk_ B2%?L~Tq%?hQwt_ 1`>d4 /*8 TR`S`1,CWhDE^QQQe p]H̩.9Wl xe n%!PYA&[3tlyM]CBwʨ cWp]؆r?,l}dE܎=]o7j!pʡ^J?%B?R-= ӛ>鳄\f2 )Z'7As2}蛓]]TW\R]ߜn+x)e~av*=sj&u2YȗJ^9R iJn :H9.X"d 㡳[eEj*mm*}ҹUg !,Te Q_X(,x CJ+ڽy49.r`pZ_=16e:жov\XL+o! A o.xUkBcGG=|"jM$sAF-th mUtrCڶ XÈĖ4zCYA\fKƧs1>{2>/"(9&㹛.$\."AFC1uv6/D^tH#BXNw˫2df!I,["dzuCL^0 R xvY1Pm1ϘB^zv1gƴ:ӡ&L\wl&LAcPb[ڝ>[k` g J:9g)ZY#wڶbW:rP׬wrd9 CeC:(|L[`/b4^5׳|%2D'RE];b2T1XUK 3089a@`J#9!PqEZn U@|xSpp,@keKAfc21$?ܑQV;3;zX%N_ՙ2@в2RXMh݈ 7{Qyr,َ@I[1Y* 2{KeeMuUb`,kJC\G!xbs@1d4`e89+m]cv |RdviX#sD>d<"GX˱8Vձ< <,.KƱ3!&!n3d: 71qkK&xdX 2Ǜ,ؐN<WCӆq ݔ SYuk25MgrM7Om 5YOI{ r ݦ"FeB YŃ[K40, U5  *vʝ)ڦْe:#h-qxF)~Bٵ{4ii2:f+]VAYuĂ0ET\PҊ<H( *AFǻ* |iZFkd^v1Aƣe/>^-M`r~l]ďwTHN# 2hG><rlǿL=uGmVPCď뾒?rHklƃLj*c @h\Pdpr|V?:o\_9_q19X,oޝ_BuyZ>'&WqqNoQ^3bz'TӤЮRr?]L?B͗WU/wji}?gwۿl3bhml4LoB=d [ͯŇ/DZY|8>y9\.p/\vuGOn\'WG,oao0WƽWxKcȍ$Z޳㌫[mώ{G_&L&̚nbкM~=Y>“w'?.DY'-|Tݯw/mkIOQX,O'%6r˛ Hm>y'mmFVUj 7m9Ł8ͪQݠ'}&Zk/ụQc!*Q--)ޟd"d(ba|aLk*' im}aOA +܏5jeyg _:P.TO/oD 79ʄo(ztFbwM{7$Of[z\{84Й4_K"?{F俊 ;X0໽Ie]pEO[Hr2~+[Ra#r88R7],zXu[iȵ?6EӍn"w_㾥l<=gV߸g kDԯiwٟ3oI0b#m@_ ţ#\xpTZC0F.,Qz+J1*| .mQ2L/Sar5{kƮp|ya(::8;[gsR} pPP;X%uo*n {nG7;ݠZ=J3#&JӞOwJWS h74塀rõZ0c`\ఇrDlS^̶,GoOȎ񭸊c8uϨDv5 txO&>-1<[GTV`Hy3J`f#(mOyA.W(͓cB?qp# ֧|zX?f4DH@%6t(V%-fᝅ ?zBj::Dn߽E!Jkd:'wj 8՟=xìzo225K~ڻwzY'cp{]ŤsصrtQ}h9l9Q wwEsgftO;9>Aqśd|GOhinl9'|>-uxUgzB~|wReaDYEQ@ Ȥ@Dd"I,%@0ظJ45Nok `PX0FjS7G9 *}îl.1{ݛv[DŖ$oW7H|>Vy.OU壠imSn:J.p[I2\z9nE2q#?h[Ba?軿@-9P̦v4~܏`aNћ(<&6j/rk5SͦϹ#wW2КvȊbWGMZK'U{_ (w}T9ߎ>s84> Á^>wJ  R$sfF^C g$O+FYm3a'L<吖yQжǀ]'iV2MN WdFW̽M匬e5 >jcX ahQ:SyBBCSm ${/=v(Hݍ p-VNlArB" 2 f8VDDi73z"Jݓ(B[PTJX-獀 @ACujJEWt2HbG߶?PGpH'hYHEHF 2IӔA p4 $L\]󋆰OZ)5m/DehϝjOku\Bu 2&&YiD|"|HQm߽s۾ 0+y۾Woxǡ#NAT-)e!dReJYˡ]L{*eZ) RʰCFݰcocU K 9hK1vR>AQg,B2okl^l'-o˓zLt>~kY^/Sx6*%H;յz霓[R%YAayJa$p@hAşeKCxE{ͿgVG :z]=P-l`F{Jcύ'&BtrF8t.Di61+ntʁS M'Se;RD| l-y*>/XǬ o cnZ=ʢ9FʆKk\73* %f~krtN0Jo7e>i / \~:Sӷ^+]'w9i&E/3Wjtn-By&nIM]2U ʻAE7Z;]?RIg ź1nKj3Ei3q׵CD5ƴn1lTc@[Ø۰YU]vH:Xqz,5F!Sp4^g+HfhŊ@dĊ%ΊJyA~Fb߼do 0:sĚBf]uJ>qiOKju+jM_ݐw?CZ7cS-YsAdТmF0Ү $db7ZݶJMqݻit\\ ݢYsg7jSjjQm^-`Uqʢ0g뮡Ltڸ@|H,NsOqFoc“c9F9Ds{1Mi]½PjUs ck9xf^kz!`iOD&(Ӻ&`-[Y #x"-O: a"\t"`,h2u?`,`Pav{uc\Y+V\YpG L/ EY.fɘA$W1H<晣ٜL NeGr)s)pL'S靛x&A=m "D!Z M2ZMB؏wo2 7rFIw&j*&o&7 3VjqO!(4]xfzP0Hmdms:S\-v"?1#%%++L%8o "hVVSzF*3Znxaf@fL?Q$N7ⅳz \' QEHRrH˼p!vYi4a:5GI 9@Fl]dYjaC.cOq٪V%jp--s+B DSa.V8 JЯX܈d ڬ'hҀB_Q\pft9((*TjpbO99+"JB4I @'Q(u9߽OPkP})ʹ(¦vP<#EfJYik*[/E\A V*$ρ$"Ռ<%򔛄&!$%%%-5*HZ17}bv|!d;fv>gZM;_!1 M fp pjmu!CJ,C:YD](AYe(ٸ sPY˚'N{%Xv-FNAy3] Qʬ<׹wtQ(Z; VlS a8LGT, l@_:`Pȸ!̆0xw*G(x3v`)Ue)TCR xؽٖ#J0OJ@p)8 ?357~|W1̮;mܮde+\@ar3ٝri-kAfVC י-~uZj}ٌD\ͪ|G5z6xW6m3lGLn5t T+^xMU%i%PUsr_ fbz&<W&n$77#ЭDv0لO}q'7gaPIn[ߺ͛p&eXޕ'pۙǛ+u/fL_zY[GlZ׭/o n޸k\)9p8g#~_k UhWAߴr\,w`받aY?܁7eּLIe0g0bxmbW_GY ?ˋ3J3$^:a /T5TJLP]^+!2 , /Ȉ 3$cJ}sEdiM3jaֳ R%$3R+*7Y'ge:eMy%)RSR)(Al2NǏ4O%Tsi5 (ҔrKQ)I*e<2R)KUd BsiAҢ53JgJX)3S, 6!e(!s4-w}#O2? SCCI9޵qcٿ" I$/K 0dg`v36X6vݲ,)۪U"էXaWb儏34cMNنqQۨuM5^S7FYy jE% c^=n}Ym6I5 WRҼy B^1fz)J"KxXUuj6jx Ъ'@xF@C,oW[dWnj Gi; kQ( ;#-v\%'< IޥPr׆שVײl'R֧TCyGFž9mߤmt .*A)@:Ĩ6dCse&N@vgI+p0̩MP6Ddㅔͦ}(ePI~j5 u lN>V[rm`Z; Z)ͽK\qcB͸/זrRƩ0cT{} l`o3,${ 5 Fhr*%N dThD(4L0 ̜` }'Cf & l]@Rc4 0k`3LL:6*h=0vvyr%:k(]] gIcf"s'@IL/.vk&wo ڙ~#i-0PCf::67̛Y+t @Dʠ:lB^IV=PsEUO%Ve`܋0 3@&|OS*5L8 &pIU+9VjЬJ +AV+yV tZ ظe(&ǣYGAJOa yRċ9O P袝E}O,Z[yZa'Nnw=8NCQ0{NL0pú9@Qt̿dBv4Zt Lγ<.u٢.pCg0Jn z:TJܺqPi֞:ɜn$dqsD36V1El0NN`W=PbR5rڬ׉8 xR*:zhݻsM Cb!,0+X̀RF{AY.V's'^*?Ug4XD*=k)'FB%4̜FSʚ!QlBi,3 L Wk]sl9ETj-A*9qHaY`㍝W@84Kh68А삁\h @ ,F n0/+՜`=9Cw9%? 01FPMU*Ϊ1#7¤u<ԬU|S ٥eP% e/G inV6`[ZƖM @f@FՃj굮SBn2pYkr T]2.gDm-cuIqT4ik',,nSYV k-*KNaU9 qw@ ܙsN~ ;`̉4-f[F%tIa&EV CYnDuQۭh &ﳅV:݀ڳF\ ï桲(Dmy5[UYT< |_+M ORTְ5Ţ?QdaR%D*t1)\N[YOߍ"w f@ Z+"<ٔ,[~p̫i3+㢫#׸FK~p򤟻b} ɦav8TpfKva q"c"$#m/[Ir$XY(~)ݤ3j(j8l/5r4EĬZv-vV":8n'$j`ȣY0fτnܺF1woX7Hah nbF=8Ьt1HQ́@B5 A4˲?l%$ 2`(=p`ۊA3f@vϥ;x1jQX rC*xb=~,Dgu^c86o7콅q㾠KX( t0їi.;"Y9ш=I)D ԐfHj84tQgFB+=+hK}k3]:4KXIY*a,آ]'D N7 v~yW6j ,&X椄N gI911Ea6~XD:aIOJ YVSm_~C./]U);*Rtnmӻ{?2I~q~z -T dL~Ⱦջ n7 @\GPt=%(%%ur:q1Iw.sMH:VGRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uoR'Ӓ:lAI}MMRhO彰H:p$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFRg$uFR&uh3$3&uKI-kzIAiH:0,#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:#3:'w?n}r ~Qoipݣ߷fU;^^9u[eh&-|!<ڻŀeᖟ+21VC«ozۖX]{ӎ>< ;Ju*j3B.()]LtSzlg-!_WGWV\7f]կn.J0U}u}wj|@,rr9Zf߬~h_O}l"_~30pTgmFsMÔfI.Os*xi+0_~]:vNj!`Qh`3]pFmx&t,9W/~Hg8xww-cRLAV\{g^]շ͙9>E}Uj޷|y,8O/=4~um2M+ߤuWruz,PIHƭv۴)'kj6b<^Za4>0Y߈v?|7nԇH{c_#~A~!=ua YuƚV饀e s +ɱ ֟*څe;XC^X"v# q~!`UB]lz=~<<6=qۯ~(yMyd#>Gs_Ei)tXNƒ]X &`A/n ~m p6pN \Gg~߮E,89=O-{~8YߎfSxo>~ ɔN68ow}C*f;/޾}b>yyww;`{N}|ۏWG__Wҝ39~ldU{sszy]KKlۯV:y%?NoĥCn!`uNBK+B2W ,Lr-#pl0~>;Xm唩 5!c%϶r"BXDh!`Bz 襀Κu`rv kf)#Kr^XۥL0'ǠBz'Ǒ-,[b8F:hœVS@eV{mMXXu~!`90 k=92 մ6rXJwx!`]RxE]zG 3,.xXXRʀ|~)e`}4.@\Bx`]lh^X9֜*6|%aOaKlVi!`- ۚz -5³`iGaOx{5A>,vkցR`/,˻p,g1Y \8z=UawV}l;CZNj!ַ@GWYO.Uq9u~p9CmmuߖJԽmn0QnM^M s$޴޼}8euU/$'_mxuSϏ.o>qn:wmY~YDVcXcv|Yd3",Iy&Eɖ['â&vս[]9 <WVb=|¨r|sIw-f^].q]Ӏz9nӯ[=~;?]:_\jޝ] o3]mHepIa I^OE9y17//W{SEװ>/ z {-x|zc磡o (>Y=S¥jUc8=kdb{W2Q7AgD} =mΖ9nX|}rp!M2).nώכ β?;MG?30Nm5o8Wyy?rv3n*O88>:&VHS?~v?x絳SDzܳ;Lzq4?Od&' PanPTk+g H>O |mȱ4<|ɼ23& 1?Džȉw(͞tN2B)#C%ݒޘCHרӷ zEۨ[ܤ]7NҢo1Z" ؈"琔@~eqIq[ypݏ}=KI YDo|εbc0:m夂TQisѪ!OZ"&B%̧TTM!"ZR,-5S}$ȟ zJ6r ~ddtHTZ醩Vq9uXu0ީǺF^_.@FfG ("XUL$O4 xf5TJQT]"s 89{elᴶH-I ui3 )j "l wHF+QͼkdLWuQ!"txр#QKPw=BGM&ʃFɖ[x.k(cJG]pK5IUEθK_`=g'Vτz[7gݏHrT|ql7ؘ y_:tJrgGYT.&w>FM(M%)Ii 80IEeLH܉YX44gGl &T(R-JY|N&8$ k4$TȵJȍa)+oQQ%Gio T/cM}pԤVd36xgo-gq-V+Ja&J1g*ШPKӮyUbhlh$>1ik`)PMѧ[Ժ "Ha _vl`hy$ 0dQf(j^KRuFh9 #pna3Y%KpYheFMJ<<&&Ij.[jFPM_Ԃ9p_${Qnjg9I+d 0f=Z(x S "WE](As5VNت ַlݜkK@<~ QO5WtE EǮRJF$ 2nnCD@u5R UHYv(+#(07\()4ցv(ƶG[,j4D`JICl6n@d #ɣnDT@s`fWE l2tZ ʎsj%eV?NYV56a&p`P7_3X@UH(kl2IjJ9`$b&j fOT,ecdzB(ڸu0(qHj`m$l.;U!`ͨάg`Rn*9m>j ~5"/IA(&%3Q5fڍ[)% iqCvI9W4үCx! CDQ =.^xd 35! .D $MpT O "$'5g^%l*Q x͎W.H(HL9w%W~A 39 qk)^Rl ^NC+Hcxh0ȀFsaF@(.F Ԫ)Q ʏK NWH%^'9Yb4z~\ _ijz\=6(MkI*(~h@&ȠdJw9|)Y$\X'#$rDI52?\CiS^Ef|6EzE^8,j5@ `\)dt٤L*j4%fN+trڲGTk8b>k7B5tf)`M&(]7m\&',p CoA|F ɴRv %f8׌FLt'mUbԄ7pFȈi|kNZIj9q÷J|19kp"6WA-GcAOJ!AJw f[E@Bs5@4?J5@P*d0/Ƣ+A-rF ,,jV% \ J H9hkx!>i%,$bzUvI ded $Z0:4]8t/NkSVQ1<+ӈ hjR*^$8#ƿ |Px!'EPLJ,* K_DaYt%$M1H廽^͢-7r 8l9!cЙzQ+1u{g~?t󰵢^E}l`1Ԏ3īuEwa㢜=hxr//Y] /uOEx2N{o_5mPJfB}eSeߛ2dz38ϚN˷mV<Km޿{:ܞz?=w\oM %*SO;ΰܼbT0q/f;tz[Լٿm0:_BT/zNz(|3 :=p^ v=QeCP]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P_oߨCC{8z({?gǟ VڮC4jzzzzzzzzzzzzzzzzzzzzzzzzZ,o!顼E==u ֒}z(oz? P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P]P0=m7\ fG?-x-_ 27%;Y7f7ު`;K oL!~A9aymccpb =c-1֡z0MvsFjٷׁfukguv}o7OOJ_b&'Fsۃ[֒lփ-g4C],7| zwuC,nLz7/ځ_1"yRc/7T*gYSEb2?͆Xj.\#@_]W9h3'QWSǘ>`/is1BZ3nTMˑ~|W__I50l_뫣D&mkU_K3U*lvMd'ŷ"苦5KJP5p=f;oۭg|#+8W[*xo"io\,s4|s:{SF'^/[kB Z:իO1 me 496TlɩcM-AA1ag<' &@ȂY୕Z|Y8E? 6+W7ОW)ѧ~:bk:bZq(FU"b 7J!6:Nl.bcXbI.Nԇ,}{>ƺ@䃳b,?d1VSpPF'هk/`nrn>leHwfw^~T/Y?(^Φ DЇ-HHc֙/` +kX%{b~ZO\}xpr3L|Qrzq“r43:wu5?mm#IA86Xs^.aE#N "J5Wݤ(R4HJC12PYU]UWuUd"M̶s5C6/~0r-֛1f H[B@EgJD ɶ14%jdXUXu0\XlK0'_剥k S!qK@'V`%J)-!VaZ&jX%%(g-!DU%2(-!N샕'V5m |KU`H[tVBO8'] B-!ԱmYB?REkԀAOA,'0mYA$%Jm(-!Vi,.I,@sb1=%\b)%(-rXlu[8J-!VQX[L0E]$V0N2-!E2jUKeT|A,ZX@mY @Eg3+ojLE˞^o*DEaߍ/REjY&"`ֿ`탢偖$^0+ivy؏ZNN|懍=Jɢ~']YmhWD.{Z0d=t\NIyaiP'ÂؐI9Ǻ` ̹cY ]KWq]E.eԕKbtU%h +!#5Lļy~`DfV84Vxu-x]J8G0e~\dR(߮ ߂=@Óg^ԻB[ {[ {o [K6 O=)X6_}Anڿ6.t3jd^k¥ɵ(lG<߻EL|a74c1qk xc.sC0nLid#6p.9n.5je7k sp-:g"QH]+4L@|2p]΍|3 Ϸ'Q̓P{ I-M=w21}0N["F<{1Sm]/r0D^ /S<Ǜ.^bެ:! 9g&|&f1V~:Ry3J7(3$wGP'^:vHm9CAƫl/R6 5, P4RF:1¿!Jűr>DB!&CjRCh!7JAM|R#,.֋njN֐f6{W‰oœ"{ ؙbe7-\bWLBt,sϐZsbP'fluwtbF2I!|Q|] uA.gt9ptzębu)t ƽF:'ښ<.P?{k0vЀ7 e/EA=u2h\J;Sx4b䏌V9eΚv06r~TՁ޴*VaEFځm!m)P1T 簑\P`IZWb:8i%j$UCJBpM0Qy=Z+=f$c:S+ZqVvv2<3f"z;b}(}Q]9Q4xpf*<=dɛLø1w0JktXjZrY3{8zez<{3@AD(g?$R": dye_* |ڲ 3Aa2nOj6_NP[qSxl물 ~+0DG¯,ؔaT3R+Bm+#}h$BцRH2sקMɆtKF5aP5oH(kvK%mqg쳔L,H)]X| pRH!>Uzq)%-8OIqz$INYyPb򎥤b+$Pf~P &fy W}t)Q2#2o)F n*^YJqc1yWY<JP񼜳> S;e齝RL籸pďiZvg;9+V&m~.S ;Ž;"ǜcNjаq@[KjLַ}H-N*m:[Ql>xJ%f:d$ $l{lyxA7* Jd\@uzkFqm?xBI ӛ2~%3 =YHec%bzؔ/(,[ĕ/zo|lwOჇ5wi)3٧e)iV%9j=<7D0x[Fi6?cޘ/:l:MXlj~N/m\&{Mô5wxe̖yn븲 m+*SesUI\l*Pvk DlDtӪ_ w)tHqSZ>ڐ?@5D ϘB;"X0Յ)f.r)$ #L=%s4qSTUOl5zoj~Y@b2Wv2IRl]F~16ՌԺpB,.T9ބG,]]5%%* jy i&96ڰF@kw6޽za{FQftirso=NfŌc>p=s/.XM.Ca>Ccץ/jUƝcZ $4RT@ ҔoT b*,1k¢pIdKld.;SK0FWVdB#6ڋכ,dȝ3lјhy#Q[9-ѤYsx|`<APR%!9cY.`bfʼYtB,RK$d.6#|MMizyjSfLTZ}ۛT'mKȟ8/i`!g߯^OҋnjN֐',lxR vvXs>̡Er\1i$F3%:1N5\JyC%ҙ ~s; 9r\?vf܉$=6r*H+L,桾&2W`q.j 7OU4B&9"' c !ذqRUg@*L=7Nt% Lݣnns|: -ԁy[wbE{,rrml6I;^z{>Iѽ~fVe-cwj=yh"9zh{ krD\`*+8ոљslUٺPE[6#\zc-& ~"GHppb8Gi&]*HF#.n喉K8qb:DO7|Ɛ$.Ufbow>PLF#W,kx2໘{2?{99:+vG+ݴ5qZo۔Z([n}YF%;gq3!뺡4jH.Clgb] B6qo/`uxIggP^ znE3h<C}xy(wqRnɶc5o{?+2(8VBP ?TSr xwjP]R:6s~[9|WzX:7g|+B1uIOOݳ4kMW/Q% |oû-/qh+ulSP/`G͏7QI.WJTruW Rv`%U䤰_/c``Q\0?CgE(5}_ bgqn ~bc6dҮ;' Z~ڋFZx0mmH7IAe]?ӝ] pl pm=No)?*Pi>jK99lgᰭ+Q{]:8tNٓNٺ N=vf)diX(sj8d+[&Qj[m,appz:c4zU~@6s.j8! 9 9ޒ|:ↁP+ZJ'W_ko^|m?5T?y WJkZ(0n}(Diu`8Z'DH( o p&qGcc`9ն8QG_Hg7 _ VЁ9ߞ+t"oڻ@FgIsz>.f>:|?@h'(rc"q(AIrSYY:T+#~x}?,PȞa<ہ*Qfs1As;k_ !L3?݌F\}\B[aawŊ_vR+KpJRAL[g8;8l\VM5mu 7NZ%:{n.F48{93ҩ监YMGs!تZ-0h|j9&1x.hܟtq)ڋrkf>J}\C.J󍖬hн5[JnA>u ! M5$,!ءõ6U0˂9\ r j/^da>.1OnT>9dق8K%z% ?f41,ȥ"~)|!9;bf0Ӎ_zm13T>+o /dEΆ0m^ _dnޓɳ_8V2t`)^ X ]0׽\ʜȆ?dw~V:_*,Jc}_ލAU!&f y`>? X gNC{}:YCi_s7a6#k/zv,aS촥jr$ `TNg7((C4e A" = jΙ =;%*YIu%Չ:}PdhW :X,ȅ8;j<&`;?iz"V{S0b]4K,eO{ltAxS/maɗPn/l&]>,aʍCW?ݟq9k>y"[f-_=@x;7._[osk%sĥKýK|r~|0ЦΓ4f{H7S.,ՙ(A^ hm2┓ ՚7!J)P 0TQ 4C>8<ۙ-RVm-%>~g3ysz#GLIYu`~(dɏ*d6_odfvmO.)z.Ws= XY$qRK!s ICGoz1DTz~΂eŇ۫tuzKeKh/.fA9V ^8e *(Zi0bDH4NK%3>&m H|ϓf*0{&_'ryh_|YU{51:\\{s̗zzaBeZޘ:tǶC-o|k`}=}$U$*crv\ࣦ1c2#GWSQ)1=a?>Bev #6HX~D}ϬIUZp U+rTqi{(ijNPYKWŹz W7сjPj֡]w"n! v38*\{`8>>+ki4$J`IB%;ڐhf%DMEkJUZ :Qoid\_tيv@Ż!bU?#w?p*~&zbd:ɧ?c|ҫKNhN&o3igc֩/هӬ9 6X<yuTT'-s*Z'7P z'2~6H6B6F0j$b u:ee٫grY#|,sr$p-sb3b6JIJ5#>;.3zns8{]f}tC̭?}x!H֕ta8Lp2s~u9w.|-iDL&dVI,D5byTH?7Q3TG揘q*'M`]&)H*LC8)4^CVI͡%8"Lk[\MY^?蝫mYv?d_Ș&"iI3̙[9ARn%1)(1)Go1S&|v kO"ˬݐ|4m&*9طxv+p?{"R ŒKϰp^и)r]}"caGW6&?|h'vՃwrǫ/hﬓjóSFRu7W*m!?b'P>ClSNDhpGT*Jؒ8,Z fç"lw SU,v/PZ- ePOT܀}8/c51"FpBm]%x\Kk?ᧇ8O4PD @@+KVAEp#l|,d/5Xk_Ƹ hH/|@ALro)eBVCd?y _80F^l5j>RbeYr_Sę(و{2e`QpRVyBߥ+UnB|ȅIH/)o~E~Ɵ͗}pP<ׇ_\/Q :!aُJ(QWevt$21qgY]n}Lvx] |[C0i,M;UMqTwrیd-ְ*>{<;OJ=O$]dQ<UL~Iyu_"$W$I9(TvWa~|j 0js9ſ˧^B*dy*UašEfՒ?+96 8aT: !x=>hh鎖"\~Re:^yrֿdL l&i @6D0B=zNs1Oi!ĔD)LKUkĴAH! AFSBs(}msˇkqVmsS?ę]໪]J`]RGwU//&DrcaB țtf惒I9 q-ub9 1ׇag3Yߧ@䎨az8bv N矦wZoNYŵ%)9hxJGm%A8q}Ef e4 ņ=RΠQ"[Lk,.}#R@HAD-CžXFeɘ;ƌ0qJgh xʜEKRkq@}XrQFIR.ZU"ʗ:hU}asVTqmu%N/.Z|.!Mv/3>H:`UቃE&Xf :%g_vIA4,h@jɕFR:fy%hʵg@lM@@R~ꆫo'%ǿ}lv܄Qq*Ugj463Q4x¬H\ݐʅ AG3q#n-0A{תIlxYs5WM?%,aZ0<R>60Ff5?ɓgJ2q[:@`PI),H:YCDzXtY6(w3F!H(("+p"m 2-6yDzP>RMUTr# &KنwH* |( b d>7dY͏)O2+mX"澝 5]4zT!%¥TG"PRL≺yԼ22 g1c)ӽh`!qdtd5? ' EP)pDlÀ 9$Jh>vQ]y;*kNkV.snxзUyRR<ʼ P&=;\4+L0X :h MXbh9Ή)2*c#2YDf̆}] t"8FG:($τ'&TΫ %ҡ=BZA@ pJSTqdh:r1VzGeq\E|}8wtE%ќI2Dp/9( 2i0HiN'cx T#rS}ّu.Oo7J4f` Sjy|2xҜREj @8ꔼm_yFQ-H ܊$%F%¬⮛,##20"rê|3*]IwBCO)(:eߪ(r qsUSyD_e?lJk'h|DBVRI\Lxe(JCy`,2Cj`,8Y~|淕(G@ \=͏-e7r'^g޲#L70Eu)`zE`[<=6}>!o0t5,wTÄjȺfpG ^%~}PIpƵ!D#P㮠iTV ۷_xˀp[zpsw.x\ /fM~>~sx>(bX"'yMhF"8B,ep(jX2ͺ}_g&U¡SRvn|>+K' ~k`3x;"y䢐C>Szf}n8m I ~]EsX}֚?hնOP c b/I .?)6@o/2a |Ղ5ʻU_{E ڲeb~$lԧ@"X033nbIXg= ! |^VW]ӎH 1S鮢-v_P#2nA}K/#TtKcY_ց"+rr`u}y_p)58!V'&Z 1eJ$?x̆/|4^w.+eF7Ys%96KQ*En6^Qm@Gw-q`Et],oq(y 9сQB'!SXQ3_"8WQ3w^uu5QZ.?昴YG 5ָLZi \ ~A/҈zqa~9P F^_Prx[Ѿm?>i&RbN+ppR1tV/03S8n0O 1(j]2)73\8[Օ@0,;MҲ"6#("ޢ  ~>͊A;ϊcOk<WnLBG-К _Pwo=jT(RupbvLT V2;@G[D(bzxFk V߆a~ug3 rJIT ^ Qw%kwǐ 1 Qs+Z/nރ[UM󈌦U 0#E*"Jkh j I{ӏ4]xvh8k%o7 ڭ멫xR[@C7)0ntE^xWi;X4>d*.D⹋>6 \.XxoO" 2i4ϐmUzN_˄Ę Qh9Wj7a ƃl嫒a޺B#p{XZ~P:92n8ќqz_JnٷpA[1D_6jg(3Pr&JvmO,!m+r;s<0 NPsuRfBL2"aU-oY : D6iEk/^ =3;n11$/|m::L0Fd4SlCz.-?[,]!ɏmdWp=9C,]\XbVV^voTM\DM2h2[{=|͂S\L $#k)N>] k1 ;PsUk*µ<>q{> {o0ޛk"qז~C:PblO/r1L;;Q' XW2!MFd4:%*=^XUn-BCUxRҵ5ofZ5P娶f8 ~FMY ڰhK\M"3OBH $o<=qQss~M.ټoU+[5y9RAL$<5~qx"bGo1/U?f(2L )Qb%"9Vk&rŸ/F<;,FUz휨IzîΊdBGnY y]f݇?;\mxa K*EU)<7DU킃ۊ՛ﰇ?;X)p75þ5\5+E]/E*wmxхJC-/ᡚ\,pi?:+E733-3ӈ;(blO\O԰;NKc[$uЄQQTHC=+@ (DX v7A(%<$iΈ@,f(yڶ6XF FArUX;9T 1CdwuLD3ؓg4xý_͈E>Wr8>w)7Q"<,'6gXfcIXpO'(6>v/q >@nDFÕr:ӒLJj5`o_. ,a/cj5#,3ݾme "ݹ~L-gl9ꀣDrQקλb(]pC)1Vl-6؉skp:bfSh.rSo'4Mc#2΍[T3](.eYOGFbЉe3RrY4P0IP^vl6gP-i[mtRILܰnK]Uo^jK]:uZ'2IDIiُk0i!7{zxg\Zh-((iEQ:FT6TM*ERs.0!ē`!䝓OblH>9U%grAƿj 1 Nh,:`>:;3׺@ 0im:Pւ#{^*/lWP]l7u 7QuN5V kv>u!䝻0ؓO9['ͿScN>ƈ[ F8+2`ӊؓ:'+ȵ LOk`y\+ފnf#UǾ%7D[*o')8O >ü|+nɟSRm<)I +$ɰX$"(j3T|7B_+| ! N|wr ъ;mJV/ω^aNs>^vc1ʽҲ!uh\#2-u;PV zjQy=L/o,gڤ`C %\w6۴QI\f*3Pa~r{0,\ ~_Fd[ è%\!m}\ />%~ʜv#AT]=BћiFպm XsDhR"-S>!G7(5 N=ՃL0L*2 +"aXAK0!L5>сxx@UaAV 8F!1OPssڑ*"g'"V_3oɰ ΎafN2 j0J UKʨٷw@ۛz ubŸMh٫ ^I \Kדy'\|ԧRps)8=~O) w9<sOmB;`{s :{s5wШ]>ClyL,#zn4\,%}xT@PgngBՇ,H.C\)LycQ uAT9_p(޿[\ *BcgS-CM+Ib6<sG'bG>Wbs}ߔ3fpZb=M_Q+{Or'~:xF8Ÿb m޻Oy~~t\ G/T=¸9_z3d!"Y]"2lKOȔ1QF=ko#GrO ν#\.?\b/;A?WR$C=|P")Egpj5ͮWWun( ~d*H ϒjdi(2'Ee^]]Kk.PWm]Z?a\ݠd.ǧѪ '*[[xi׭Y~:.6o&N&ϓBK7lؔ*R m-V-^ ؽ?wܮYbry'N 2# q('w-u^"QV:*`nJMj tܶW(!1a8]L|pS~fP(y簃yuW;d6^ W=W,R6}@X)<( st]b $iy<[jIf oVuV!0z} (tHVd UU(i< a yuW4Uq&ZM G:l{(-]ŪR!L s1Ε*3h.PȬdVح: ?AO'݅FSf =&,_+u@.`CT>~R T]!?KY:J8p`4ZU9J*&d6#J ?4ptT xSGAO&G-T :f" X C\]/U=9(*rPY_ Ԡm֖ULDTI)WbPKo,jP @1 =jB ?wiث5!T?suVr+t ] +vR;@PQ;ƥSʝbN'4n#8#ՌOK@/^1\VpA*d`_D+"ҷSe6NBP嬓dH)d&5$8v K.0[i|b~QM"Mt]: p׾DV$ SzC qKyǣTK.Ʀww ؑ1|L=r4wa;ݙ/ꄀ#$~d)Ŭ;+*w{gc\L~kl&C5<&nW뛿ٻBva-Q\,Zs-_/s18Q~dII8ͪ/xڳXPX`>cSsÔQݲ~߫l~\\y|Wϧwۏ~^rRyA1y oqˏ:8 6|yS٨M;r.6?_1pk suY=%_~1O6D_Oܜ7rvqg-j];]-I2Xw1F"s٘X'4 )Ma+7%0$%:g"E)NVZwoIIED(ć3?N0/ yc":v!Tl {=wśVV$r8Ǔ=OBl"[vd"nmEqٜ=~"_KW@xˋ~Ļwo𚫶 o0mo`+Gܶ3,JCga|ȚvKOcv-;e;> ۏbU8|200b|=о}w8 @I23 4# h)'U(dqJdלr&/&۞]&ߤՕ9XX9ck\Ec3'SRg1eܒĢAȠe$Z\TX3:+%J/8GL)Sc g$DP M(VEedgc@Oy4\1BV)#f"d'f8 qFKaʳ)B(j&F.)FdH2dc0c/ !,2BEpv4';P2DJ(V\ex.7fRK7vr4@_lMٲnHPj3lp.&tƹ9B9(S )1# KU~KX=8jJPM^D.cs?ݒ,J&.Sn_ 3jy͋Mw~wYow-3{/+tKr?"@BlNzN̮'( G7v;C67) %" U3z z3Bd}2MG2UWr[Sk@T+heo*6Ly}pÔ5􆊐Z* SnCSTrvM`7R z}Pa*+ UU\3y0fC:t go.Q!b]fH20v<̌0YHk\/j U;HIeu\2:@| dzy<`xx ^duU?^dc& JW4o*Gn WzO-u[CWWxы&q-  {wCj(^!*k8a6DeJZ6$”(s$OSzƢVjͬzZVPļyID3{} !?Ϣ:!9!MRĊ#'YJ9ϐ"U,5M5:BF/'Zvbyp!?^4"]xPsW@T䰿2k* 7yNbhYq,i$8DTO"BkчzD3CMV Dst/s|=14k58f#:0~lɾ,f\:Y&jxusa(Wޗ**s0.ͅ9JJ%ddc'hfZhDDzՁBVԻL̦QеF6Ѽ\lJgmǓQwm*=ϫBz}sjX?RKk .9Ke]'.]q`ѻG7Jٹ16?h݌brqҗΛ"h<5q4/,m1y|tyV.9IE굖כ9\LY!XV7Pw14g1w::Q*"K#Q"$ )ƖrVWG(~iho";wpUW̞&:ey3čHǘpYBQȞkB=-d5ONf)%1 !HXʾpi}Ef29jf(oCѺ!΢HE x_taZ> SCu9g`' 7$e#pG{zOgT<-OcYDFϴtB:_zˮ:vC𤋮ORG_L5tKdE%xHtqriu{u=lu( (?m$DWW(KKA&j֒ /Y+6vz,4z6*RpOg<8k yxP]@u<Gw],ttB﵈rD!!V{0yM:!oK00 %hi RmʺHHrlEu7T.f! !4b͢T)pŌ$Y,E&'z`s,$b~zN03+8"N{=/:<fbX x&*5$吤#| H,"bRonXSckقtd5 4Lg쟣oTǧR}2g_*!0N_K؄\x.?" (ssOqs] /s~ !yQsT xR'e'+7+m-WLsYIG[ײlKCJ%k,Y<ۋxYTaVdbp1-dGD 7yQ$#n,( ZSD1?,kkbDFJLô;nk^< :VV$P:`[vôqzaR|B!2dBVNJ &Š O~$*Y Ϗ18R{o|Fe;apb$Un=G75ָy5KNzzE.ـNe_) `JOX3 Cv_E9ՀbNT;0hdnwxpq,j96e> ReA]Bt`*E5x{(6gA,,dzW1d1NYv ^ehYm˹x*yCMZ!mBF$8i{J~+:d VQLƎKx"903$B AB97 B_fHptڜ L"1R Dr{rʛdUgn˻RAxbQ~4Phy~Ez&z BW:=jocYIl֧Gl5 ';f1H[RF7bmkMޫ&d^۵FJa]E8M(x\ftp57qZ{R@_9`UxCat?S#[ XݼC(,V>j|mRzD^Ĵ gf+LKda`89|x xT͊eP8*(,>;pbثȏHEI̷ĺ|cC@F̾غqD)yob+,_" WB⽕"X kpc࿹ùyA.Э@ }:#Nlfd/ܱ6b) z thâCiOJr]iLN3Ga#NDp$a{x}|xo5P`l\,-p4f氍MuC# @cu Ar"7k[ާ]vbX5=dI hy NDu8Yچ 8-X`д\Ay; |a'f2Fj( {K Qڲc3}xy!F1mw{@:ܠo>nIx$0hmFJcۥx1/9y4&B|Q;lHaZ6m14$gCDWuD!E!E}J*Qwּ߽JbW8^dkq4g0?p0)?x?VǬTq0ylGdBSh?O9Yթ2p+Lr$[1&rF~2 X"џ~=X &mp`2 g3zD(0O묿T 2\1Læ5! G SbF26^Ye<ݬIN)@M<2yS4%^wYC\ W-ɵו*70{w+p4N~2W iwbu۞|52?gjIt>cp W-5}c3槣$Vr|\9(RFcRnhl ¨ݎ.<Wd]B[ry2*Fzd^kSK'xabO) Ew-7}'o=%7gM"5q@}?>nZ W 7ԈH\,ndN&E 9IE8Y9lX e<KF{kwr1h9s'#Zr|a,yȑ^֒y)mB(mZkrJmA*&AS,agV^tߍg@)ڦ_W G4 ͷi;Utb-/=XּԶArTZU+y5u2s7~jj۪^'/ھomqwl;O6)5on=7laҺv5Zkƪaጔ"A_*̟jo_KlOwx ųJ?G@6XmLzTPH.JșRҝ7lBm7in=Y  Y<,ŝ8\ _˨YkcZn~-^5jX{ͼuOb%Vo=CcѪA:|g6f?L++gS/Zl&řkTD=\$nX19l7GՊPӊJs22kI@qsbފ ZL\ȾB/Mg.lo~IXW7°CDMFFJ~;Z/ .>OO9A,{h\q(fe/7+p<bY)ةSkd}?Td4!Ko,#k16o,zxwRfHky,Xøksop6,7Y0]bSK4HgrTaD2ep܊snw?{b[c^xL3L/~vє bke&Zy2U"ݿbţ> 5}@]7 `xnk`56ji01z4]TZNՆomjz57ƵRoO_sO|+JS \VHE}G ~az d 襋5;\mE9ͯJ݃;uу[#zǕ~g EcU17bRu*xq'`Ზ:n>f7gApv c}+T1'h.g16MEfWlYxyqË+>{h|~Ռ.qt|.<Dʶֻ}kkVjߗPW"O^@Gwe ~i-(R"xߘ,79A7i>qe_iq-ugW?89J͜{2;sCPZl'e4\4*Gϟo@`80Y"<f]%@#g pOp-aZ"ì)C; OMv,{)A\ؔA9J19MJu!lp)/GPZc dw)Nx&UUuwV¿#}h"K#)V23YR-~F2s"Og̺_'Q Шf}. ngS}M:kd'G(%!렷=+xA{2 OOpT9 r8A^ixmF2s.Kgټk1a!:ilsg%AF]k|m:s\_ A.uezn7[b$Ym ,v l6iu0%(TARLRIO cER0#}QrB|w=*5yS8qV8&P{ytF2sL̙N'LBmШC7 l}C 1C,EFB h7Cpo'RNDaOF-#3#70Iy&Y2^^%{آ@͖>#"Jb IK6M]jʿ__ a|gNɒlU,N1 i6$IohTf};(,F]hgI@x`{#rS; qlX9qfeP.]-/}?<ʆvT-߲Tenm8ށ35l=GGw֦՝$E2;8#wh(cQ{޸m/wT6Ih x\җmhrw])[q^ˡ7NP3pf8/a^ŀc^0$o  ?bSؔBm +O@V\|]KdeCr)Bh-jt<; ʈ&1>,28'TcܽNhSB )PHss%cP9CWfJ"pLףKXw4VDR0rss@F2J813qa})bi6V~Rz)wpMsp3y z)\#2RkB0"0IvOە!޼OIM&7ޣȤ d¤ yiqrH+)-pc]:a[]q]3Rb3䥿{$bfQ"݉f!#&wñiV(ׄCb'.-a6jS=bXLZc -+%iqn yYV*yd"1v6(ħ'[Ď8 q+e̓!Ź >Zj{wfSr%m|FpLd@sZUm:;nX)LD2bqQHY$2_g ߝYtw;0b[MtGϬMwh=+O#@ y# e^fIV#n˵NTE!<:R. (,F 97ß.pe ]"_  rd4EMN}y 8( Sv =W" vU-P&C왉AU R\]OOn)-~FpL.j*oԘ}}O/ƪ8‚ddq>K-fBM3mxkOq#8f& .2Pæ#`z3 Uo Y-&jK8cf`% ?5ժ/ OUoOo6r +ЧBok}cͻ/v]#Y13q=G /''-c]NTKA][]Oj}zlۨfVB q5M㿬v`fdBPEy90u& gmDnPl=hhL(јi, ͒$pr8N8ڮOhXcJX8 VӕE<ކIuU4ZA{=Rn Qli@/Kef )! mRjm X3SA#V (q`[cު=5g%GUOu:p56ei{?Z-3SA]AiVkq_4`YRa"qR T!8e4tqZqgux6 :mc8Ir Ɵ@]U.vUۊf9aX#8f&&s|i9ͪ@_:w4ɧ ϭ;GpLUΒMʱQpGpLʼ_Xa* ']!*\3Ӌo k5!?&bkq}1i/9(xl#ĨeF!5Q"cH{#98-U`_mY"b.Vm(ҙ:8#,L='FpL]I9Ͷ=7)a+ʦEg8\u Q@238䷷*ʰwf"PUsKG.] #!2*=Kkt&%~&Fw^ 'JSwޙxs. Z-#8f%NDї.0$MDԺHe%:Dc ==t*3GϕR-g+4.=<:X~Met/AWꆠZ~^4×'=&gEQ?~aBjVZ݃9W~2o:8Ѷ3<|w,209Ux|$}vܽG^|d-݂ӟis&ﺺ.΍ C[!ˑWX[obq|3TLTrQHŌkeL[L<4ي֫(ZF* <; NF(q̬5SNLa\fh̤(r$)F*e~VF~*1c{oz2ũ*M}IS+t ² ьsbcB H8Sк$h,*@N"@eU^|վоfE9E<1l"!YxqkS_8g#N*(Xu cs ;N!UEݢ(fcፇC"FSc{e\= /0@pL|eupiJrhFy?2ꥮ7_n?ԏץLZ=o|Ɉ1 pLm}z/uT7@ֹ5$e&doۅ.7W;t&oXVmnz/Nwr"Dwd u sH NQvGx"{; 1|Dz+inAdPk1XNj 4kˆG!Fr"9ˬFWg #ޗ^m8tby6~siÃBgjx|zg2dCQm$vs^")Z3|~%w{˧1L-׽(r G( e/Qr1ӅŲhzb%a:ݻ6:Y#1Uuf@bTypնs|c@U:;ȶK??<+ՋL ϫ%(S (d vR:T|%(:iAP.J^JgEd 6_^_|g߾zIߟ}38'bB0)$¯ 45smPG A|r#x7zuxDHqG]շ]%{ -39oh=h1C;8;Ɣ1DCt7;,[]UXaEFu@:ӇiX_#i&sB}>AIsAF&k\XU_/hvΉt %Q42&$Nȣ8D%X(X2j3m܈!^jڗA0O0goӏڗX ="žYS(J*’K4'i5s8EHSt&TR+ +k8pFG^=gWDdVVuw T1oٶ~/do'O+y *S;fYq&5vDxzr,aݗrtEƒ*cbա tн(7U}ݧ X_϶A<ػ(]e"Nd2 l$Lb*="9^ZO1j Ulza,?W9,^f_l]nz7|$Jì-e}BfV$wf#Gq QE7C` ֪$_['.9 DkGS?z` - :i}վCQ 3W $#vZ[]g}g;-pv1>4I wժѝoS7e`=zwG_!W~,`C !N'ȇ!SKDiIJryPS]z/@0 k[ 1ʒuWꊛ38ő}qr|Ap$aQH&mKJ<*l17wiӸcˡTŧkLxUj5ޛn6xbJw*^g5FX^Ti\ŶOrqV}Z_`!{U8=bd'T\1-HV"NN9"Fqr0x[ ^5_4kß>}j(7ODkeMqip̘G3fTf9xI"~ ~>fv 5y8 &ɘg*9"3.Ql"g5gcn<;×r\ѿ:wRwm#ð!GIDOE#5#J"Fc̉^xw{n_W^T_Q]߀I9~bkłi5(h ˔HB(uX:_p!AJUn"v>EӴp!q~5n Oٺ;8M,ꋖ'.&T")@'1i490K\=cF]dV\\wFY@|&bsp91sZE6i9(_&Y5ӆR/{_V#%h}͉Ү"q Lm2b$7 ~td |Dsh|$VKb]U9%YT &Ay \hE04PkY6c%EԼ>Q)A'4 ԉxo5 I @S .4AӁ> J€cPs 3迧ŘǸ2hQ{}|9 $/-yDCwO.X?N Z)mTy%Z(xuw%,,mgܰ ;*EiR8FRxJ-`@K |02hw7[=# `sVj鏥W;]؎)몦TM>y\ztZ3=6+-vȤ":[~{~Z=o$oRVa6.hbu5*2L=\Sخ3jM> bxƄ%.$]}izl54aj]zCoxZb.fC/j|A`lj̒~i*b}ɛEB+ly m.C%D`Uob޲a1wŭ 4,kYqN>ſ~a8)J'ƏIkVt桲q]o z?gVuN}C_Oե֒ nP-u~(1&=qpK?c Mq` Rʳ͠,* O$i<;wRY_&\V$tAXJ'9V6)Q[%)H&HQ #yNrnvLHeѺWUHi%JNo'WlW^6:> 88K^.INy[zw-q gB%øT q^J`SaN+K|9>wg7>]4Ȭ+xoJs5u&J)\f^@*J~I#RDN8T[nDy (_Z^@"^N^gĕ(KGYTpmH#1,(69L*ˈ0Kjv `S & a$R2,NQ83$aEsJ1q%Za\osqYZK4itKKsp{Nz{4;\-N]ż*{C.A;c9`klpYM $\ 6LH<1&q)i ,<%PJIyKJ"K!1Ut$kAU@Q$eTb!0p(p0>h">߷Ig`w_)DEaƚUu.TN+syY|ty{yT/rƺAc$2 s )Q U. g9v+>\ %&`d%qE^dԙܡ= Ib $G\爝s#vW#v֒b%g+R\8+"s3XH*@K3HpfIb`A:_cY!~Ch5uĂ7,qF ƛ?pҊۨ1XML\XbxK- 0lsN/ skF\EKGSülorѝYeIa=ڽ)V\DE_k~-QwbK&ZWhrU+[na#om$P*D[Z#αAڀ֥QKRKݫ]v o(mHEWjm1^vL;c舚uvMgy$!9v=M cBlg M_ņ@0oi q4}o} K8UJ:>ks5] (gա高hYn'- 8kV^ydx{bs&>%KGC䆣.$Ң&?c=mOONj[k1^KԝYn W GYjY7;uŰ,w ]&eWTӘfBw҃Qkسf ĩbB1;?{Wg)}uo\ei4u:ޞr:t[q-h ,hXevJtc>rIshzs>a>Dz"g4qjsކ*hHM-Qj7(Q7#K@$ 4-5A& 6b4)ňd9 3yzdi(2'E{j@)ńL Jasj)KbN ! 68f%!m'Nˌ'4Q?{op113M Cdi8:n܎Yc::8X4p3Ѹ\@b, j.by19`R"$2` ɤ{I:=ސ! Nj(O_4#+N__?OKHasޞ5@dϞb1;ӷI" 1C?$ !Kp4 [y1m~/Eh$ZN|0c$"SbLĩO/u0Y@ U>G[wLٔj{шXGRNwd6)TK64O~uJЅ0CTa,9#1NEctLq[kas*CZ8n6B.7#jY ;!A^d Pfhn q JfT(sGl5 >XE <%j}0Ji}$!-RKghNE7.C(-Y ĄK2%iY< N "v86 -I$"R& 6 H%vߎ $jwGX)R4_Ȓ\c*)܆8aiDm$_!zu$3LwI:ud'II,ɔE9qBۉb]3[q7z?0@ x@Kt**dv4^krҩ%%NqYC9Cjp0bCf' D4 v  hݻCPdKP@DLکHaM˥!3 3~CЧE0};Џ>"P$ bMg`PQ Q Zۿg~ -9n~Z0;k|'wPHh,;* xA2X$9ѩKPj)ԁ7`_7̿v~`p`u/6y|пf~@+}ybkaWy{-0ƁA]K+g"x¯Sж 479,p=;E5)9@{2L&3@]Aްypv+1D-NɅ{qꚵ׺ O LCNBt-U4f;ndM_M g7I*^_0ô~5jvlr4d7M,iVEq{-a!$8_$2:iDO8Qem>-磺Bq=+/th`0̖US5uy$x9'׳?Y 3t1]L|=KŎNtV,ju uU0t*txzi!$nd\\WZd*!y(Gl1Zs RsڮcAN:}v']e3(ׇڌvsֵk|"NR eSx;(`YX*t]&"LVlT䤜8LFCZ.?ׁ.}5Y׬A0#Tvq 3d`#\__)~]м\L])5ʹ{Lsu˿I a»$;, Nva W :[uBϳ+|އb}zz2D4(cr<\Xka~eRm8\3Ɛ>SZ3Ꮔ_YPun<(zm]RsX0b5: ˽eٯi5/G%|>N\8(O Hq@˩mb yPô3 A7=\P(/xh)5]ǟ@nFu/>d6ERspqI|`K*7$}pT|-UM*3y91)Y?3a,U5GO/ƞaO폣=Q@F@tƓy(itW)ȣӣjNeƒ$LO`2|VbR-Lj2*U@^Q Ix7ʔSqNʯ4~G7j6ԕZM*w dO~|UlGmOԝ4TOz:'`cb=|RK|`2f ak `PlVߊ)k]!jM,8A6>0)JQt04NXқiu}ťeYZu}2aaMid6/釴DXV9-$BB4By06 -4qU^ bKj2ɗ's9F:4ojRvr`^/-ٰu JËȆ3ϋphNfAB6(˝x jȢBיuF4O8s4/ݗ4tp1krf "z@E|2yYbB@ZgEO.]L|T#dVz޳+՛ʙt.x73P^ 1 frޟeXAzߓ7pqxĔPϕ--jRFc" iۈb=d?]Lg"mb|5O(Lc/o~xNK~6:+4~2KVO3I? V/.0%>W+4RjEz:,8yo'`2 DoP3ݒk0V`gsx2x~nN?&[KR6Xc,1&lPUk8x固@ɁbD̵€`YT7bl] ‘pIChD.x{ :h 9kA\ktvvZRbM]n)+F9l(G;Dtk}.΅2\)"Xd^e:H{s}+¡ԵQZtRgۻ|4U `2y@H"Hl,8XT GVF%IE+az) Q3 *’b#%跄ȵNĔf21Rg3Ǻ.zh N9z˔01p-P,RDwXr_9GAuAc=S!)ASb:]v"[]6w%4)|*%lղѮ[|"hJ0JͰ98y$r  h6+v<s,T@,<`R˼h':^NNON0S-=c4Iy l.MOSSt4q6 Dd(8y@іiJ>h4]-J%V#G[Y67іO%V7vtCZI}h_> wָo a7k"N#x4`\B4@"#TeD/(^!K3$KEŵWr!Mwa<-bb]X /Nez͉TEf} EQIkHȸ#% x  -&:n`,\OzћV{;!jjZ8zGˌ LY錧G/?e|?nM&٦R+` S)@R)@LoFr&omo4IKRjwXɵ ֻ">W`Ja%`JXOr=ɍeN|V0Mr)(f!FڞE Y#7F,i1`JB1$@N{*rQuy8<Ұ<"@u  %;$3ƭ cQ0#b&4 Lj= Ost1ΰzJL['yGN\@@*s345 %!QrE Ig~S^RAx.#u!L:3$eyH, (nrΜH, uPpmж:%|bBTVP@bm.8b ̂m?YXrlQ0{,Y#d0 `sQ zoXnMBkke0ю80-N;EJy  Aat(6KYY`hh/$;@Y (mhz`/آ{|R9( yG^r%ƨs&g<%(5 ];;+\ /Ct7VxM;XC¬QSQ1@E+G !yKHWl}TvgQT0sO,|H7| SY~`?|u-SKLȥEh +3|JVvd&><+mu뫽x['Ҙuun29˺I 䇊:Z TKTm1ZvZ޳Aь[V <;4̏W)Wo=MF)c~Um"҂#-b v2緍>:L.q%謌yŲk/K wRUNO2xEn/zgحpm\[B[zǂB3\qG˖Tjٗ _1WJ*zo6%WO{"MmJ6J9V:*^*ɭ*g \,sE>Poc67o6T6Qjޤ *wCަ75gZFSc7\;Y 6]tDX): "rm]OcXj i37D!֩$L/Zt[O㉾xu>ڨ65h;Gkݟ+W8Z))T H$%Je&)Q-_{R4KїQ%"n *H%&b=x%=-Qib 37D0@ a"3LI!zonb7`mNtKPa%y_F+V?\Ǝmb *7ibPqr+Xm*VIXw]z/Xzk^9~.}Zu-w]$Nm2ro4.4PR|/VMaI [}Csh]?Ϋۻp[:]ٞp8ۡYv-sh9 Ծm׮=7ZnLJZ6rmpUmkYw~KބQ;9XuoCMw>gO9>}U)@^o ˔ ka Ϧaqlb oGv0Mg"8je&+U6*Msٸ7լd4qypDwYѯ++Re2qf/ǣD}UiW烟RS%yÂ&Ǔ!9I7W#3=c3>i"0.&glriD=d^]2=xZ(Skv oh\s** (<]P6ɕ*'yHg_[v6BWFB<z>p.{9ZCO}5Gi`?˽_=+ c& |>p7O{x_0\];A՟ C!cgZQRU&TIJ$™Z[Dtw'{Gݝx $1?݉;m6/$wB Άfr%.DT^w"&{%tl-̎] ȧ;-#Y<Ηg+`z4- n+W0,#i&aYzCo݈?xh~9ݼ7}e뇯F`3G5薬vDY*3ւQ`6 J.j>f%w7TғlohxMJK  _ry6?vˬ/EKI`%`(w\ֶb'6椼T.4^r= *z-|"LJGD"KcJzwNS8nVY=9>45 ?#A("X!l`Hd"6DI B{\B 39jr8TIk ,LIJ@9fCV/. iAP]<|h&3.qpE4?fK`D^~G))hlpLh-.c)&iО"FK!1H`306_=M_ĉsC锔qJSseQ^\4&s$ 4 AQJ'&mq AIY[5! kIȅJ-<";F]Q6%C&4elQܛ'NƗ뢅Ŕ# >('zR.CK+\|HH(El l7%!tIƢꠄ$Qj/ao(ť(Ʉo"DiߜJOs&7 OlUH3Qopy"b}QCL%}11,e-4da`r + 57ctX`@lM&]\1;QmE`E'a>mѷZV j(`2)LsS.D a O)cT`nl0IM_3S `%O2+i Z( -h0v!;|&d)")BҨI' `f)U Li.-P͂*u,XT{\0&)|kD[Һ9(Ֆ<z!+& c4Dւ}L ]WUHF!9+HDf\&=^xJtd (oփ0l^UjGjQ6.j d7E;(c3^ ڋo7޶oxy_/C> W1{%#2HKpA31nOj^TFP&gGȋd쇒H 59V}n5"~ւ)ك'3>l㼹 tכ{?p\PG+AW6W L`@[H' @ ^bGAs)GMz \|W h_5ʥI)"wB ֬!5YenU9_?HxR4XLT*Y#A٢V-( |^m$ǃU)rTa:+(2^CU{,%@Z;`d#?lV!ˍ4M\*ն?Tʖ"^}S $9^*+@Q UQ킀p(#С@EH0y9,PRL=v(dZC/B\8#F juLC)Im!l L&eA7B5Q,, m ٸd1-:2$eFbg&Eg p+sC>R X%f`Pzn LFU"H5\t0Pp]rHŠ@qHHLEmCWiAVxZ0V|y(M:3e(ŷTs \VfkBETW0"@L2-JfHł\1kڵ/RJ@ iqCvA5UFes,kؼw/۠08tI%'@I,L0_~„QAAIijsEA@2IuقWIakk%Ԁj*!Ujf+YHy \i^I[ՒߤF f )+.6ʁ/'@y d1ztGMnvf`#Q!.F *!ʡjBsxEX_ ͼZe>T^-'uK(uf-" VAyb6фL;JIBb&g:V! >  #u11Lqȟ_vv  R*W # A &eRrF 7̊`RN+{jMgY6e-Qut&~Ldkx |N[*dnǷ!uGl|QUu\RHI+ٍ5c J-?,lҥr&Y@EdMy\0*P-'1|+4wuո6dnVazo+0HS!!%XדRHxǠnUD!+A E V6 RQ6-K.7-\Ug }J@HM,G/B$}R%*k%qJcUŅSA l.K.򚔄DЏ* béFT@CTÐ8_Qb@2b K̷, 9)L9X(国^I[6|7-(3>9LGHRЌ1pX;F_>bqTsB6'^MBjmtvi?ZiIbgM =okc8Z/7 {>out;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS;uS:u S[|:יgAABD6~XvcEeSZ.7͗Wv9;|ѕ GE2gn6[=5jXlف%L$('R,é{|jb)ɘ`wOW7w'/Ƴ*4u>#6o$5?/~,?^~.N)} .3w_ c;W/NNPwol~?뛃c*~?6ǂkVC m!`⪞_N fB \G7 I?[$cWeIcQ.\}][o[ɑ+^6_ a1dN #D[Cɢ,{vV"Q2[c> 3u׵OUכei}<[+6 ̾Ȯ$I'o'[r}Rva_2ITFzz[REtEJse,FyA|It~mS*!GٕhzTիQE. {T)y.ı qB.ı qB.ı qB.ı qB.ı qB.ı qB.ı qB.ı qB.ı qB.ı qB.ı qB.ı t!>*}x}pv3*Q*x|P#ϗuM݇_`\4/|Y){b08Cg^Ńc4;ak9U&LS ) )WSTe Y߭^fÖz3ԛH 9QAt75fs$cFvS[> )᧰=fO_c.=e>E=jsTo[O7VQ)~g?u,?tLOtvȕ1,;laAZg0qӫ`~>ʽ&+u>Z]zzg(] OAeB^{7Kgn0OQ _|;xz:>6|!] vb}f)ͦ3= zjX 6]Q RL|AEm $-;}C gMLNkʭ}hw<ˇ*o^Ny;t8rYB/֋7ܸ7\7}*W{f}]#O/] lƒj6+\A_<\_liS]yrSUĸ*^i8^jnib2L@vL@n{#KƉ?Ĉˌvrߣw<rrz/NVʰt$kDQd_-*wcZ1ZR7PJgEgn4Rfk(Ic P ˺58$8 c hCԡ:7`[(\wD<0\v>K]dbBx#v  UQc"^ Ũe? ;{w= fF&0lQo;x/Ԇc4j(4F枃T =++c3LGHxFEX7P^cJDPİGsE캣9Bn98~C(zm5͈F2`U1OICG-zj@oR*|\:`\ufoW;C䚪{Ģy 8m<7_\Y heʇvB}ho'WkD !U\GBx/mЁ'j;FβĤ|| ^{_X4srAURg =;z[{jeXP,pL&Ȓ{x7B)~eUz0͕DFS.l| ^M(z J/m5pVݐkUJ|y "-&!}g$zLH~U1F$VދFK.l`bnVs x9kP23 ]ot {aMuU^! <Ȳ+Iց3 =KC.)ƄSQ+R`''a =[gz!ƺ 2cRTp\)r ={='CSŃ u7YMl7L7-:Nɂ`.$@J*+I;Bx)z8h;ZtҔ W➡)| e;xk&z֘Q ]a =k4w,\迌Ɣ 7Ph_xnH8&׻ER%,$zk`M}.ѦX@VBac~q˽OAexw]CDEQ+`}h6狋N9DR?Lx?>Lx<[^ nVVn`[(o]"ʍ^ PQ(`|Q)Bhv{|4LW$MہVJ3b4B^޾1)ϽS lVT1@NGU jDBF ;$Lk UJNfOtr!5C(Ex8%Q)"jEsl@[(oD/ӄ\]܃Q㆔Es-:i`>EU8^`BB 0x-zYX,+ǚ<+Ry49:-:wWFk(c:%ʅ$Z(/󶸪DyiV1j$| #2gRh1hPAFpE(ԇPޣۣHES(Wp ^ ~CCYI!T$`hA"r^X. =KU}A܂Io1R3<g{Pޛb_N,$.ٸqνZ(3mÌjQRIuplIP"&x(l#ֈ^D\n+G?NjDc]䱘+gnˍz4W??%x_~^^^Mv%Xw503).O~?p^ukaԴ'VyTEI€e䅢ޮ$pO͙t&'$_Хg'1ZYjb2K) -@^tN&(fE5{\T>e9d0q_lD]d2G,X~ZӶ|r֢T k7PxȡH`([%tz xjqa_^,"R -Ur"EЌIc+Ar'rߢt!dM_^ܖkϾ)ks(VȩEh5⏪&]J(UXU7k"* akb;^*po}ؘR<֚Tѻ([[zg+ܴEJ158Z2oQgᙫ5HdDrdҲ;HfSӝ|vfkIkRZbjePBctθE*eI3>T?g)%Mh/ad瑸^{6Z%gX-)oIuӅJ.6YV 骔!e4 ':WIkLLl5\fZYslg"*h #\6@1`LmYKLVW ^Z(T23clpT2Hh2:d-=b6,]D) _%1t2b5KyamraML!EMEO޵q#e|@[ܭ@p_mZ4]O[RCelcٴP]3!È3?'G 7EA΍&xрGǥb4(\a#@]JCH gbJ Ab0."@L/`6bEqpLqf-LwR8UPCM!%8 8(գ8 _A ڀV2 5Dr)Z|{sa i]~ |>fGy:>3sm>3_\~wTS\5/k2^LSŒuimrc_-<,xEtIQ(X9*wy6/FN$qZWFh|v+ٚJ̮=L?jns͊tvz j L.hojPs?w0țc UUE͇tcڧwMr.c7/ AӃ- S,<D;,O"8L*(@$zA9 Y"H)&YQ.yՂŋ_4_]i_<+kXj;iwgk з@ܟ5uKu,\? Wz(t8 PaCf魵{c4?zLgS7հ4vth7Pˆ/ߢ=`kl D SE7Tn~er0F2A4QI.x}7rNY9c3[$t!7I. mZ?/.K~N ,UJj0l(oѩrKK5C^.\Vw/$^0O0<^B,˅^&UŴ^]Ju#ڵ< io@dGڶ K2^˱{f==$[[n6ۛ+=|,$%Hjo&)aD;)uwLB>VwPԨkbEΫe-LF6̦"pcR{qtfOeP&M)DUI=̇ iƼӭ]X'u[V` (ʊ*ގN:XGI友2X, &ESu+*i.+BSէGϮC{&d]9O]3MU`,L/i}&>m-9~2 ~U:]uNHɭ(WF U|K_4\N;m^r-S&WCLo=wV]98N4wgf>wXZ%")B&W\Qi}nY[g $IrdaQg7 F4apGA\`VwVx;jc HQg2hCZǂB2# {.+)XوՖX#R?#Of"|X*e'JԲ1Hwt 3r X!)}ÅDTslj%PJGM\biQ.aǐ{Đ`8^xD"cF-1GN(p\H7/[.t#ɔ&oorǂCHr}?}(7Ǩ2KP {C@=e OaC#FQ;ia=v7=޽yy[gg;ۜ_=ˮnnqn{;l=z~=zzc3𸴷.X`$dA:=Ek" .miS`ږA\gw[CsH&ƥO06Z*B"#u @b!O>|*UՠS,;rye2 ,+"yUdsS-WV9#2lnkFMyx>`)g0\Q0BDi'X%f9X&j{@f}pg]Z+]H^(d<׻<\47: '1,\0[Ob@!f\c0K[@cLimpt(D4*E^ ¹FDFe #&F(zg RwtF΁.S6{_ǥg-tF3:ߔAx?d-P ? ^b;Jy#J:$C2`9i.nsgd],#/s^|1+)= EsXv6cCG#NJ0M,1ц(A$Ȉ1Z#mb,`u$^P/zQF[G BM2;B 녥òc qŢ[mt5Wj=^c]K:`fibGD"O/8^i+ rXCQe՟_%#~u"3;|Lii71NacFR$ĸiZ].] 1%ʈ ":y&(be~vQ>/@sׂ 1 Е{Se*$ܮ2`wC61j51YWS_Y4A`QK7hQYe+_ l5q1J.(sUٕ֯>5xviFB~m|v5OpV21J)6hx6pN )@% 60πpN!>shq[D<8!K>ycl3-#w\6E9G&,ʥ"$g\(kj}mQ<S&N6[E.Y$SBF-&F &>aSS)4!lwѽ xda_r-p{kcp%1 'I+Dz I>ھ;FD'VH8mmb=Sm'GpT dKQG˽`Pr޻nn)!iX 8v:{[gާ/Hisy_ ]oƒWc?$ 2v>mؒ#ɞ߷NS-JlfXǯ9k}BI\}҇w-l0 htq(J2d␘tz?Fq]%)kLXyK F0d[2˟8 ^2Ţŧ/.[OvTX"ӢX|:ke˻W*fco]fMMTj6I#I]C,*/b Oo{2ks9 pCG Cͣ[Cú^>^#:A+ht>I!x+1=(m#Z#TcX}ֻ;BsA8Վ$Pt#@NYbt2  |DNjژ13EP\$5NZE8(iFH) %4BCcIJ82vlG|hg᳕Y;i4CeX.vdatAffFx#?~sR)&`2PUe'7'Uln\ގ~M{pr}~|)^p9$}߿vRNncrdhY~ 9 4YD,'$;F7p5p!#X"#C3wꦋ0nuLWD {3 9뉢VFyKUן`&w6<3Gn<߼{)zUY"+i8;-/t7]/WNsۯ[~m%UyK׃GIЂ 2w{ EmUKNK,-YHTБHW pF2s Ȃl Q NJX4nUK6x}5J4΀t0bcMU-1AEj0!"yf> r.Dg:44J 0[u^pcbAj)1t3ҭ]Nv:v=ԑ7B UL%-* wmOy.86K/Z({ia f26`| `lOSZᒷ"#via;MaG@y~C0,[ %bY7>ti׉vmC<3;eLJY(l:Uq,DPwX+ƉfuW,Gg%U>F}gu8 )5 x]Z ~屫%Q$*" E/a$xEL =bT}.qӥz,_!`C+Jnڂ/p_*-ncc"0'\"k%R8땦m%}AF#0)3 -e`r)Ь6%&yvoN^Hc1P*ถ6Ea$N' F) (Ks) 6dlŢe'iuOI;j ۃPLϳY e4ץڪ֥te97G->Pu\voH6WlkVRS6԰3 ֚4#{bs(]~p!i1ENH є Hӧ?u h#Zv&ZDsLwhv@q,({B0*N@Pq/!H)Կp)[EE(#gET'ZI^(XGڙ1@&j6QeXĔH .C&12. 5BP6y\<+}dG^[X5#1"̏r=Jg88Saw@E?ߜO&oN۔z~(mJ}øp> }߿vRNndsq b]L #o#A4# Ùb7p^'_..d+}&dC/[tn[S|=pofzl^ŻRUF`&wIɦ8Gn<߼{[T/C[_@3, gpcep;5jsX6_'{.OZOW@mj7t][n/r6ȵCcCk=:a>eqek]?FU9 N۬B,;'uCIfݮ#~g7w^;,[Aajxq=eAY S&ٗ@E,wT,0:P4;v&F& nw*DJfMr9hJ:@y#= /A: 7v<-Y#䇽wNc,YkD ';&1`\_?70͹ЏxyĈ;՞a[J?*;p^fnnx=%䠠-o9Z @9j9 KʀrRX$0r,+ "H ib)幂'΃]RI79_|:-?Xź~w9J'"R(|{{^TqG#k0*o}U , "EıTJ5zd6C{Ϳ*99\CTv i9~ʠv?N.T?{I=/|6(BrZa/lYƙqr,U1S*hLbo74K8G+-_ : ]__~;ԟnʹK␈cj'X*X>9F'ِ0!L/LB7J \{XH@'HMK_]yR=x?x~ j K,$G ,EB<  B!oː"'Lz`* ڂLV($aBT΢<*R\ґsex)4i͚FnL54&N&%5CKo2V'1Rm΁%h`=xt^;o؎ҁ@#4*d ZDfNGJb(UblNPY(V~BqwB$'*FAr(p)$OP8Lri'"$H8#wB"f5{p$:yvSt<~4݊3Q"gIK0$w~̨/zjN 9b184G;d]-< |-zRD$2V8"QŐ)ՑQd/e&IqbZ,jU;\تAevzH Q>Wh*<T̛R?Ê걀w3}e]aw(p!"LRt*Mu<*.Ri~MS%&vzwNp|ۏ^|u|ѿ(G/ywN~eUgzc~H6J-MUM֩Z7I\ &*8OC!W-f{ppQѺV]7qoQt|C6.S̱~J qC3N }S_}G斂1f3d$lu2oVM~{Zyו&wɚԆW+&3BK9Bӹ(ai^g?[G/80ISْxrM8Kξvy%Zs oGl9fN^R'v* Ȣd!0Ee&)37h`n{`\-3j.Nabe/ogH߽l=Yw{%,e_uysHaUy7%oV6FCT}FL!|Ch'6daxQK~Ö0A@jO?Iw/SxN.]U^yD p|֌Ԕ] RcJEag-'ۂV| ҙTMr/__'-z 2f<:%`\Gm5j,|Hm1We'(+nzW̻vvQwxp6O䪡76ehJO:L}38Q)mcC.T2@#R0)!{)Ja)lj@"id/ŘG>(rG{p>N0_2WI+x.]C[iD[:`eYNxzSVjTt ) JHXk@fǃ#R;w#.gdh!>I3bPmH#5<('9xx3&uP^ZPȘR0X\ʴ9`7<$g!8&GHÎs3/9w;~kk m{O!!UD|&,Wbrܭd#8pkbH[M 4\ZhVe }pԸ,IھJhEI٢BɇF"q` 逎T`-JX4 ,2 FSZ$28)cx9rb$38;#JlSUcL~XP*LH$9(phl k}Jygty~-δll;l $pHIЎ1d]:/DpI8 P37/F%& !+FwV2NV` ` 11v %oE>S Gkxg1qnd؆QB?5rEoe pZӻޫٲ5 OۇSFfքeθw'bN❘ʓS+A_bN{@U= 8Q5/O�,N_# ab>bG/~_]8l 6&\4՚iv>eC DCkEr|t&r5)5W K)Q5m)/(L 3Q4C!"[H"rzvWWY] *T X"C?|gsze⇫'<pY +M)luVL*3*!$̬WDPG /qQH 5NÔ"*gD[`ei F0/RpnPn23t$:' ./?HuJ̸ s`I+ZU*xʈxMU]za}耥{}A ܲd9c[ 3`r=]ƸyZBE}Hʞ c RdLrI, \N*S̗!0@'}؆]AI$mVA(Ɉr+NnZE٠7W%_[̕]讋:FrO k&4eQL*J2n*6XcwZ;arJ+ᒨl J9xP܉!%.5-lV}&'cph _V+n56VuZ2tkSJT&~cOUoP,n13/_֦K|5֎O\>k" {e]%>qW_#k<1KS]9x7m@zQ1OjIJzo Kܾ#K{5~0;<3ߍ&]\__M9/&My\߲Hl9ۊ*!QRAXAva^̼{-E 3hHt 6m@ lU+s{BwB]m씗8։[]꯷.[=L-cg0d0\pas^aF8s6)Na 9V1|׫,} Lqm_L/.,Wa߮ Y}O:shDDJ1vG=].ikݸ3_hOfO$9뻻M?|lpeo8LQ}볥fOݷCxqSpH}3V?Myoms5B+ئҧ9\Q s>ĩ?CZf pXrt.X8D+[8Dh4X-jI ݫs=P|?RMK]ݼZJUU3:k Mk_ぶwYTUCqEx7ۧT| "?j홉i)m_7ukam_'i}P#}Sv) RWزb儕-Ǯ%S:Au8up,E]!Z)]]!J5xWb%E$.om3l PզV٨QٮuNAfI .$>3;Cd48puu T>{)P_?@|ߎߝsVQ[,@ VDOkCT bpż%YxٛtЂ /r†D9 1oQj@Z>nY9ӂ:K+/4 HZ :5Ɇ[k= ;E -oFBG W Ɍ -Ojr5`^EvH|L"w$!{$zaD3k..<+/U<:@9Y B2;PI,H4;9歌&hR9ʓbPtܑOYbu%cYKʭAJ52e,' `@$rp J$v!}$ro^mⱳ9 N i$_9Hc :pSq͐`sGi*hB_Ix$j FjCSĎ8,+sBՕ2X&w'Pzn$*f7JRn$Uw#v#Oq7V}8Lc`ݻ/7qֈl楍g:6sV&.Bn=!Wd0z{!8Vuc.9DoC f͜0"N/>0oXXE9Lфkk*aL2H56˦@eO{֐?eRW2g=D_~xvbY}v_YdV>VBPyx !&¾T2"*)DZ~N!TVp )4uϤ6up%/E]!ZurH:Iue3+}+kQWV*B鱫+Dh1ՕnHmut, :pz|7F]uC٨yJwPWzPW.=UD+Dk^]QE :Au$,)"e(qfVIDF …[\pYk *DBk"oL*wno0zmU4T"+YyDWHmIMDxN OYAY(| ¸1'ʑ$1j#KRh-BK7uR%-(Wmfz)ѩ h6|3Umk=C 0֫6 XZz].U{]֎6=w~$ZVޱ~ k-tnԸ׳6ܨ"ĬO-״mKě^|4Sΐd ZsXghHG@ym٫'xl%t.L1xS),Y[ \@a9eٚń ڌU/:5#> 4]c[8 .G+^5I(e/JZrFH@wHL=F ?)r=o'MgB~%uy*2qŸe%Y~_zqq}NYQֿoBU}aH+`߭Iw#rj7^hZ N;^]´ָ;/(kD=M[=7zĩyz&]DMO(omбm|z'Dw} f~L8tZ6hf36f.Y7)gEJ/U,bapT# j\.;[ ;/WȚy {=o0/lO!z=nTlVR'SϷٞmϦ=jdMDmp5/%&2d`I]Ũ+Y) ]]!JuuJ mH=eϡfDQ i q&qcfO9M.j|߃znW5-&*_m^i+yQrNJYɂOw8oa9C8+qaz"U$&q0PEy:.|?zsu7^GZ}|8"nl0ƍ+J7 JVWF؟~mv&Q if[AMVu=8|UnA'[6,֮%bؔ95C Ѓ+vR}eDs&uФmʤH>!+mDEeDv!2E6TK/ο7>9-^6'rvlJX"K~_ |hغgKl*9O-~ڇƲg[>IX+Lot{.>))jg}r.Sj&?ÕStմCR8kf l`boeC]ajm|]J=,~HlL@/r uʂ?)kp -CV󧱰t֪up-E]ZM䱫+DIXՕՆڒҰ?Ũ++E) H7zPW/G]KoΉ!޻$'8솖d7: jۥg RWX SBJǮ:A]= RWXbU זǮjvPW'\]`x1 rVBBBՕ(H]!A+u) *{ QVE])T RWXVBn !C0xJS+dIup%)f NOS] }61-dyÝdln7o/؅}e#PkOՉes6mUD}k65jF r *)D-7Dk~ P6ShRŨ+[;+D{1,bl˥IyouȁuCKtB %; v)3ٻƍdWPŀ`d'3ه,)lgoum&E]CM5BBWQwBu{Ddv)J ]!ZNWRHW\k٦D7Aݏ}~_$d'#Rh=HT0 "Yr4T*ȶ0[T*% )OHR $xT6Is*=0bSx4~Ip,.憗 . GJ%{馽?AF8@ i 9\?1M:_ CMC8 ǕEZŖ!zwܘYC`pScc C"TQ +JҔ[]GZ\ZDJKYcRcR !*eRM(LbT!W&L{@>Su@A"| vWD(o *o њ;ږHWj}ZJL0o JovWJ 5|!\}+D+u Qֻj"]i>E)++/thV+D|5-[ּXV~YBW<5A`?(IZ%׺[qçr|30b}q-}&D e(ghE QҖ+O!s#0ڣ{WsypqwJ5ǹJQ30-]mz* ;~ZXIVxCWRht¸$JNZj ]1ɌTTzCWyCWvyZj]q)) "\ ]!ZENWRUJHC •BZ֝i׮HWRQvKBJBW޻BHW  Y k o+@KI׮%5-]5*wBs^gӒ)gZ׺~N9-K2f! )ʣ%77;(QwPZưMt 2`aоt(kvEKW+I΀yCW _ ?] Q WCWihrձ\}d*VkʺtE[VTk|+ eB\BWVҺTӂN~Q41il0*0$r:d&VD'&MDRc.BH;A-}~N9P9EZŖ,B)Bo>+7SG#^8qaunaa68*ގ p]|7Mz?@w X?fq?d\?Ltr!Vtf 9 _fs:Lhۋ'(v;w{}OrIܒ:M~vn Pr:I"m@#ABœY3in;c:T-R~$l)([ ;:yI&{`aX zfwC!n\gQbgŬyh#G6#\M^9CTX}1%9bOI鸝;-\rzjZ bcbd[a @v)ǀm?=n6{K%66t‹D P!}E)&h{QGӽ#`\eObG_|5+گ!J#[_F u 8FAVŜEυ((J."p}|r{ֱT[r<al"\ͦ)we{Q#?c4#"\|+D+j$͒M+k#jWtpɠE*D\iAbUzxX?,cq*U)U9f;]U=#R]!`+/th;]JU9w{[ţTqNMb%beфD!330RUJrow)l( ݕEZRNa1; ~*?ulr@* "aECI]r (ZPiTb wip)#`L"2i))q6$QBTUf-lj`Tѻ aj6 _$ˤ"xS$K[[`I-s"Z-jsK*75ʱv?N!PA7 ~Q6Né*]!`b u+DyKW_ ]=B!ǿ(`qFJu\*XKnPt[VCnGt?BBWCEh QRU1zڸ*]`7tp7th;]!JZj ]q9(nDt(rr8ku( N(VL Lm Ud DjGP?5BSE H)+Gt%fxCW7_th=Tbҕd(]`K7tp-=]!J[j ])ΤW6zCWWK_ ZNWs[j"]i.iBsI?eBWگ]JQ+[: ] rtU꙼_e }=YxsgkS"wbZ3ciڧ;C!}e(ۍ&:V#} CWzsǪRЖU/zxm gV VGγ(vǽDWPZZQDKW۪ MBWXoCY7Z:]1aP`Õ7tp}F9֝i骉t%{DW0 ]!\A|+D+e QxhKW+!# "\}+Dk]!JAZj ]I<+u+@Ii骁tDl"]Xj~W>۟?v;Dn8gӳ̀OY֢`uͲcu4U-6›^ʯ7;U;UɯԽYiUFi`̗ձ])F&KRfsOclݻeݵYd)JcKR(SJ*Q(ŠX0x MaNJhUie7un62/&q&nP&Q8cM|No0[)MO7iG)]fdҕ$^EfFOWkzwx+SXFa16D*B>M<j)J Ws>MU ɓ< }4 tl# 7~HL~~U̅M׉K.7`Dֹl u|72:g v_D/{rM\B$v^7o%XoH`]yQI82Mf;)ٱl!oӷo^\;Zqud7f/vC `I1*ގsjéjn.Þ& Gl]N~ CZxt%N5۝'ǧlpAq89PHfp)L#V2pQrwq3 {ŲOF F}(r{ Q`|? 82>4%Q$p*][j!Ḙ 7GuꀣqaC4 %(1_?0oai~v;jo鎉œY;1Ԟ4*KQm\{m)OmAg>iw'Mznk7k AS9.3%-_K6wBs7ZjGc{E,W1"WӾ1ɮДQVKOf_ֿw/{k7o`ܿLPߝp,8`ASɬ+ǭtscM>. *oIH3K% %&u(+*\,b!1q&e*di_؛,yڙg|a+E*D,R@Y_f;;{iO*|kIJwl_Zyk~\hm)\URSKVYjr33yrRe#0OM;{ -gnVʒI 7=_?9s$~5s7})d6 '`mn}ZPb[ #.,K6Ǻ3/g+wz7~7lZlΪ;O^J"Pzws LGAA2'T!>>STe 5?g;} R^փ|R^O2HXC5ޜM*5LT`;j~Уf-kSg 1i🬖6!M]b9qO$,u41: W Q g&QN1Nl(bF9T%GeD /db,},1;K;I#_ :czɭ_dw40s 3AF]5#_ZR'YlZXUd,1 -- o>5nȑ1k5p1IT.bܪ~5SYaiVeO_ _K^Yu.mvJ_m~g|m6=y=%wֳ%;ٟz&] 7o#37TV?loUoq9KNnͫg+IgnϷ47Zfػ)xU< D.p[ ^g-cZ_Ҹ7I~ۏnh5 kxާAnQ<={waUOKͮM0TKTq%4N)PTgć59ѕnt 7xg_, !-l٪d5|QΏd!{0%\6\7IçU;ys?c#ArXKIGs%:t\6sLt4H_=].2"v Rz}K.{m:\gsҷ˜?mN6gf\>gL=er9M7!ӫ+5Qb]eiu㏍znбӭMtxW9$Q&; CMivšF7GRVyüM9Ǐ5Y,|HUK9nUIB!p$1 |ٔ/$u<dsB6Z- @wO?p_ѓ _%g"JϬ">6 Lg]SDNrQ>&\{ CUY{ue_"<0<֒XHTp\f#av2|ƭ 5:-`]$ℋ5ۖ+n - 7`Da  Q'6elOScDD9!',W:p7Bh* %{XAWZ8'ԸIl<$OO6~ ak6gEX}t |#yW/au% x, yе<ܭ$"aDQ9(SL,+Q8#ֳŕM~?0L 0UB\~b( 0>^@S1_'x dyB!,D8s`o&ʷ1 6iWb|4v~'CnR1ab 8#Vs+z5peR˃vNg p`ń ]P:oco (t6?1Qmȑ'͜6nI>fHt &>oK00coGo~s\^Tiv@ .Pm_{]$IBc  ҄UR&)gYI,+* %' t~ FIȨ*)(&<@e <AKvÇ ziN{l7v3!zuZZ^I{$\%yX)*E(MZf;zh/#̅Dj̄"1h albcݠ=P0_o >_ :-}\≨؈Yц5&јXyw͟O$tIqQtl]u.INnd˗A1`lm<=#1^s;^4{]ǧ=!xB/~ns;阖>Jհ1 {W^^gqq6#[ !ֆj+1_F\pA%,TH֯br+Ʒӹw~'"pڡoHBK8[ 9r:P0 Ax|JNx/AteMAqrRTH쯑Dʟ3OdO[<ފ^ޯ,E\L 5iݟ [u=! - Wx3=狿3Day"Sp:w18`r=`R!mAAAYǾmx 1ӂ֥FhGH 959ɃdKg$rbT)c2&tDFyQ0St0hv BBC%'g\(+AC`XX"zȃ P:(C2.I݀XDCb9g}#!dnE ȫ =?:.3?jȮa.˯Gtۅ|8IgbD dP;_JYXc,,X0:a]Ns %2ay&%%. =8:%[tvd’Zw EWӉ]p%R't]|eK4;JDS(+*II f!Qco{ )o=2U|M=Yt]</ ם!L8,Ø";٧3cASĬ3έ2OՑnX/Q32e;$JMO;`%"yQJRH$JZ"0$Xrј "uiH F$S녌DhHK,XX%1T?fCH1}W)—ؕ /ɳplexq|RH98K(/Fe-8KЩx4G`4/č)9{#*'RnA yhsp0ikvpi & C0Pow4 aNϖRm,Fv=XLO;u!| #F&M-e㶠ݵ)hwm ]vvAMr~P#)i,CeQU+SRͪ*QeV9% UzHsmVC~7aԚz}L(2Ֆr6wjP?iüuo&ul:}?/n WO53 ؜㣯Ѧ}R2TQ=&&VeJM$8#^هcM[Ub.F|5 5Ge\d)O*H)& b稪bYv!V.A[Q:@\6Yo'>"`mu?5u7Tܱ7* 騅>>Pti귏DO$:;sRQgN?n^>W xm׏'V#^XtHuv[8g$ YM"CdFn |Q{{'V{br$gkSUg9ǹc&a@G'\BSt6( 01OF I FBDRf\Fv0=Y7Bvw !Epz9)a2K2f>8Tw8 tH' Bn3  b?x ka‘w!bG:3 &Xc!ĬKaO[̫2!?M[DQX:|pNi5t_6qB@I[EH`D4ZٟfؙwOuuXR/Oz#^v+VC}VJ朚WaZyz̈́B)uZKR&eB(K"͏0(@ONRJ@E"E%eɔ3ЎǺq1V{~ֻ^Uò|O{Qm{fyYRm/)H1gh/2id1lYj.=Yja'M'Xgr*rvtlr@5@_sˉsNrS[Nצ)zmʫ^UmIYTY2+cYְyaX"J2ƪWʳB ih;Wy<XaLbʍ1 \B0zcaȽhGN 5C $8!GToqUa\:kCqm0m: U+RJOH}BJ)𔱂hBd;f#+d $4Q4(-bW.0ωL/ #W@ E73Yo9BI'0 w&Hz1܁uߔ^@qЖ̗tZ }h"aM'_2Q3)[q꺿$R6JׇX!V RkOJ>T Ff]xR^u&zBMӴpDQUߙDƷy(rJ`p)gtn9RhxeI@j@~X:E*U!ְzϙB4jK X:)CxRs|f^zz^< )H0|QeIRjAntEJ6bpeq7*x3LO"TKrGUKSx2UULN9TUSuNlo&.@p J̖ 5ͱ{5L9 ܭ͒Gnyࠊ89Кv8f='ԞJZg $ztQ۷>No,!%5kSb P,%P^ FBĎ\}n'q0Ω*S?.5f1 J%Aġ dZ !a(1BĚQ%0obTͽBIy̽ bTG1Y$SX ŢO VA|s/1ji sY8)]zH)2›'ۛ5g+b<,e%zz>@ #$6Ynt/c-"jjlkN>i5)8ywn=51ɘ(l$pL}\s1חԿcMK BOIěA?;)ݝ}w Q:gc h H! fL@pd tǹ~ʰ% t_KBY`x$k.! j{ZaJzZ# cDuW (r"(rʜXjFSL6`a0B1O%RczŐU\#CZb+w;LSg( #DZrzg2o_+3QL^Jiר*]ZZU;.TIDMiHE.ҠE:?r ~SӰb2:>K6ASxCOxwf#eXb5aÔ RÐH*=KaG"4xs'ᨖT@!$|7jBK!AʸCx@I}[u ܹiztT1it :& gX%jf]=R#mX(3ui|Hg5{JGƸH|4uwۀO4պwn;7F {DsZP9lz~l wHS2QuޓiFJ|L{]Mq9Í{\p1fm#Ęqpa4 nSfösOD<{"eXv;eXv;*ݖ8LD{ ᛈ˹ Q@rN (1`qcC|=܁~R )}RJA?-e,/e`QiGՄR[Z1hG,YbYX+a_!b )wc1…A !J܁:wՁ=6یOYӉ о}UwsvQ$Az)U^ӓKKyʸ7h1n`=R^"~wW`p}3K<2ƥA S|ob[đ&~6ܡK3ͯ>Q@K׫21Zvˇ ^;سGx;X,ɟ`hpsgs,SQ\< oX%Pm>ZuZu5؊Y)fGk8?ocH;+Wn>/Ob eyB0ow|q %?\} .v7wXBaf3]97,]ˊY(Cޢ63b 1~IrΤ} IYj37A& f? M1߹GM%.)Q1O$v2٢q(;@E)N 0=+epQ*36ɀ?<2mXx6^ܸ5=/ޥmDS qJGQCo! H9iJƊ06^6.tWzGx eWtxMyLZ (-b_#PIz-+_~<_nn;Xy?^wrKȺ0 g_'V䬻S$!uJڏϻx) QAk}o%W)$9%|"`W/ (DkїVhJ^]{ViZކꂋe񽛿lv`W? fa:/sJ Ƴ$46#!J5Al;ʩA*r4'`Ű)DOz'$G1)QaVm8%X M2OVN?~`V m O> U$sm ]i>$Jai~0r޴Yʧ 3?bIFl~֠AS~``]A1T;D}ִ (MRkVHx>}I{qDP ~KT}9Bf9I1WpP֗#ġ, T8 ]r)GdߚKwgCSMl;HEK8_ssˇۡ:Rhx@2m!V7O{8|,:FS0"29U ^ihLcEk ʙZUo,b7~ƚ ,rq}}3ƅaBx!"{N ( )s,pHs” [pWXtUKJT*0E\P5(Ρ7EpKBϦZBջgv=Bx8e{Uޜ Aq{qcN* GEíyN&Mӯ"ߖʤPtƈ>AUf.A"kDF@@dVY`t5u+3h%s0bnˍW聘/Lκ3E)Q/CJQ-cbQ[z[O}/#'Y3.ȁ{p?*C*!nh=(HveAς Nm[l,qo^KtPw V]/s7wen0,wo xW⭤qz? nUYpMHؾ,b,2փ~5ƯYG8Ɛq "g^xERR2o qwc4U]9ޛxzqz]HL$`P+hRe/+M޻ k.S7֦,6M,ч*6ށ bb)z9Xl?vZ-#|yzKo}Ed.OGdn,QQJJ[0d10!sy3kY%>cP9>tmB y.e-YokjX5R24ģէ(t?^ ê@W:\jS(@T&Xrj[i2.VLхˮmPJrtU#G2`F*/ZJC+18:P)OPӕZRJߧ*uJn_rWӱ~T5v {bD{;ϷoxM=kOy?}t/+zٙ?WuJjYyuqo>g^\n`u5p%<@xf}P0.}ض9^>iv5jܿc6ϾpsN@G<ǵ#*ԋ*pT\jOlVk=;[qV oLΔ6h[gkؘX4Z`AH*&+Uo-pO{f/ȭۈ֭fkfp;{F%dLn vwE!M'~faFbL1$5y񧈉,|!]TaJ +U Et-c<.=١K^lt .[P/ x倿̱I?T.An؝~PbۢA2ܾg'Q>)4k,^EcauԚnpcl6F61nWN6O:å#Y0K iJ|XMaϷʉB/_&Hd^fgҵ'0R`ZQa`gCדk ?hp=uдcT˜js݊`J0꩖JG+nj&׆uj*_T@{Κߥo)'fJTՙXYB~OyS'>}>eSa)1h%seX\%W52@TjRT01mDss$iIZ9I~d@?* (HGWYf5Z(/z5JUÛz{+:%(%8GM *C(XUfFLyʕwxI3$=lG5ƛVEC.xO@]$p f@)Cz7ΖB(p>aAë:l=݂ $P9Ȭyyw?H sw9[4k<\P7_1>8kPr#S4{ܤsUC]դ2gƪ(V:RYh[ψOS1&@jUXj\ ?!zkk?߈op/PW9q3 5eԇzՑvgQ3).=!dfEOYQSVڱ1V^bF ul7lrWF;0TQ VDv4(kg{(C &8/1fG,_l7 !Do]4i y(SL6urNMzab.ɡ܅K1>X騍uu-Xz!G"@\Q67@ye7޼vP *uʒVVS!ǵ*?p.-S~E,ɰR?~J?/#j=ǫ]um>~hCb3 `V42k6s[%yz\!{ٍ6MP# lBs,qg-`%1h͓˛3hԸlÌU1)"J{ibg9cHOi8=0uM6wČϧ &=Ǫ^nwNy?,8Z.T/x;TrT;u1)(dVFvI+[bR[B«(JWq )SNN~La|rtXN{&Κ8jy]}:i]9"x0DGZtw !`[! ÇRkq3p~]3JCXLα z6گ)(VuI]ABkj},.vwH8'{j֤nh!vn),7.5μh"r ̳Cw^*O4=~}v?i)'aiNuWҜݒ?&qjTb;`F=H y&Qgr#-yt8!,>B/̶qD$ FAiiMBd#󾮜1r:tQUkg(OJZFא){xS*ӷL_2u8:W]y4@.E $bE4XR$Leט!AW..YnCޛPU*RbyBXZ/8Ǜ\α (O\qbEҁeo6T)h DK\(ò+lje9JxsTMpŔlsu_B:+Wz 1SӎY_ &EY CY r{IέXC8PMeL>y|a7 fĦ<;Ffi pfJE5s9=i:.wpL 9Qysܧ;dZL ѭ߈*BEq?f.rEƜ:;b(..;*謜Ƥ>о*-c{_Ѻ'V~1 ;f0;,m9pO[)fF)^(0řy^6|3V}]%%DRS r).WZ Źr4fG/P޳,Kc+\09zLfzI>F 5]9,:?hZTfՋVi(| E*'u-:0#\伝Ȏ1-Dmf|Pt1ߑrW4zx"ɭRyq xhΰӘk4E:? ngٕ?.[;IjV~C`p}#c0:!Tr4s,H0==j1.j9ZA Ғkܐˍ-(ИX[誮[_7r ͑p6]-/PYp8υa[rL@SڡdtVص>۸tTu_̦`D_uChGBQI@(-c_)kwL~7'gjFWYk4îGH$ U5A$"6r`PC52u*v}<:O^ DFKTPI!zf%dZžCIZv1od3vHHYFX&2-Sc]eF Z4)p+:SI %\f眲O V_%v".況bhsq,?r^z7..q+s.o;dӬ<)r݋2u7Z2kbhyNQȾŕn%Ձ!N=fҕhӛ'^ɢO\4&֚@>[*_o6uFaUUPS@8!LI::CV;<@㸮QV V-2Ap#{@ɭ6lU5'iV&1BUjsH6 v#hL;y y ]0 "hG3F>1ьQ5Bg¢ hF{ ̒ʘṈSmSeo1B?dLV4+@VƗ' )w$ˑNEPZw*X5Pw+l ߟO>}B6*DD p1٫jNӬ0%uM }Rpޓ\qV`i\/r{2 I,5 `yS%Tj )~OFs#nH*Sy `ZQ<̤yXܤFCb$39BOPN UY&$sՊ%BKݐvDڝuC^f$NZԥZ4zuhZ(AsN{'R&E d ;sl6IDnQ]b!eg-SB ~ɍW_շAU Ł&8^x(ڂ?&%T:8o>yn躵EB<uRB*V&IKjDh&C=y$@p=M<7B:#W\l\ڼA ew?C`nxٽPkٚq@]$ۼ'@b:@1Š1yȟu؏؏M8ꫧc8z(y4 DzwKgzR?t)ixZgg~lp3/n:\~^=!RA~!M8 vSYR<43]TMYI6by, OTx\ޓ >eσR>P9KDŴ"%,B<Nd .1l{~f !@fx0h,2GT:/#hVё6N.j^\:6G0.5&*uA(o?7Jۑ:6*YeޘѕmaIL^Vs{}-~A,1hj`3d; >~\|_wB<"&=!b<Ң?'Ά)?ߔfϊEsOkwGi`U5'Mlxd|gJr M3Eh#][QS |Y.PE%*aQ^T0+ LL,*4V+n,*c'bD)^t6:C W Eǣhg$ 2_o|ڷ1k_kˉ{"'3tǔڜ5fI]D#eEiVZFؓ AO^Hxq@$?YFn2DVn7 9->\F3#l iD \F+$ -HVW8˫ xzk2{-ɱ/hlz~g8m 9c媫~UF8nèX$}i3'u 34 q]j!_֛X4Hӻ v{WAQ6Z}ROHx~䴵gd+ZIly~LwoFRkE)wZMe&7 E aq/.-*A^'} 6tQ|y!(dhv\*:e(;HV@I?}ϟrWB㥔VkHsDwƵH%Wz\? p,!C.’mDB{6qI%[d#}9:'rnhm|@ie`n" oSF&s:R)i(T2k!3i30Aϰ2!s&R\nS0ǥ,k=h;s-(e3'}r4(1n9ݸփC#BWcNpC\?z/w,ƈ G'9COИ0Ax Y{ t& T#JQSDXQB4b}RRqugM5.L:|>Vx{?ZsR>, f\ JD_OIg qx:TH0RHٌN 4i6Ir!k}.w1~~+a,7 -Js9$%_B171T1`8GԦÿL$ 輆\uQ[CkSw%+9h_xi!ʕ+uˢ9¹\ lZh% KT%.T&T4H#:!wƣR{Ћܚ gFoܦ́bBvs\H8 &T7ߓT1޺"+3mȔϚ j|ɗ=Nwܖ\}ۂ|2Yi #I3'RBSjpd32h~E%lOc`)D ("p5هEqz5ujCe]EF&+WddޖާFŕ8$]^$ CK0.ψʜd-%0u%z/o-߼Z oϕ_qFCJɈ7W0+5n}fQB$e%r+}n.%Bɴ-٭C E?1fyq;/Ά?dEq;;=73NN.WOo.ڭo w ܿI[E,jrEW GVE~!n%!;ךꭵZ ׫9@ D3쉀qr0)ǜ5#|8'myl:@CQgk>ݲִ&81v1RbPz.P#qD HYlrC.ęr*Aͳ 2z9x@YXAnڛఎ竺L )p?o-crĭJE14Xc"Uc3RHYhZJg"]uh6rE2S"Xg CqeςZQ^]晆x9C- 麱{^WBs–こN'&b+ 'LrURJ-$ı@F]_,w1H:ӫz -hR3DAz5oNMAXuQSP)n)pBj&6a6)}bZt 6V l=b :Z j6=Ϣ٘Tb r(7rz}R*>=]Yrq=f鷓'BE`FYCB ((%L\ pL{Kbmq H\wyBfb4"(Ќ\?g<2EŘIw!qى:_}urVB g[$M *9g&"@+$ 3cjXx "@GNZ"Z%v156 Δ%/hD[ӫ񵢢gӔxT JZOP Cɲ( v`'kU ^^k}5c ԛJ<RB8tJkC6_PZ"q|'/j+ayP q|uih['IQ#."a7D3Pjwdx(s>KڑdޓN#uHiXPj@kh, [U [k7RgO:u"Vb;T;"&tvFl'#uSgFUʸ [[R1hѓw!Q u=CEUs띪|ҳL]LT}/%n];$%x~Q}l̘]a!#4rA<ǽ ),h aKy+Bǽ""_&G (E/6-F n#S2MdRpI[2]of{[aŹH%4uZx{RܤmD*eaE5@/' 0OКfEmM4;\ mB@_9gd#UfgYKJ&ۜ;JM4Jdy8c\_:won9[i.pbu̾,Qu JJ6NE>d9ɽͽѬ<ܛ?1ZgsGpy糖F*D p/-t J ^\7a"Fv/D ʌ K 9(tP;QJs$R687Gz@t톷`4f̙1hС xHØ?t0O@w>Wd0'cvD;mx]{s jLj j<_>c<9IA>}f;WBDF[0G$݆9n7A9 ^m_Zsf'yj7G%һ-I꟏wG2Qyz}qw5P;@t%!Aι1)xsՃ:yi_K%]>oO43zP99L |͎7#x{~ܛߏ//b tw?MŇOY~e2:߱ /̩|fwqr*#w59)`}rа緫]6_w6_Y ~{֋)F$B^U`)]ikHрcD,?m_ء)f_Azr獚'9l&j=k^;7 #xl܈T vwٸ ^8(82۴@< Y `#0/h/,q,. ' Aj'@3m c waʄxnP%Xguhl%9+h ]?IskPm >NcZS˭D^ĭ>7y:_rr9ǫ>/[DI]ug̈y/jtVa-]dXk^#kEԈoh ~GGGU=:at0CD ,\zw*_CUfy{'Yo^R-֒ٱh@]BF=O tɍN.O`̈$ˁ;wّ7y&b!4vTZ#HߦWxhƃ7v{xw|kF/j_ ۉyD #E1R>nR}I:3f V~3&9:;M3XYi7k@g n3b*o̙Xz&h%fv&8~ۏ|hR{rV3nWŷ[OJp4Υ5T|z GW7_ T PBkH(q*"* 7wcOEC}yxC)1\3 ظBIS( 3:SX{o%żקoJuXҖkoGmXTB)F@Cf;2V' %m(䍎U$}H:}jpXSqsΥZgʜ&o?.25_*/r,rޟcQrcN.mJoLlU칗;S fj Ot!H iu>*"&-Vt4 BʼDn9S]Ci%tF k6$453M~~?nuj=꤀Ҫ{ĵԽJՊ2oɶӾEV`>5V "X8#ͥph!q(Pd-&bg(.ri"5 QnELNZB.᷼(:\wʖl.9"rي)ڨXJ3opr >.PM mdBA.dڈ?4 8CIRp6LY73@9%e9;o';gO%H"伉qor [8W.,̜PB-alOO^] CT4F'6{ᐑV6S>cVZY'cI|OZBÚ4VK|aw+ѩb&KBvCTr/+} JTYI)8- J8cU"䅅9\Lr dON &rr`(=Qȭ5km!T4UA|L(2q72~c Ԋ m'ݜI(ʻVu&|NU:¾DKxgo»=lNLZ'$1g2])(.з B2 wBͶGZ]@2ʋ]I#EY5e\$Ax|MDa5mvevX_b1-A x#G`݌&=ϛNs5 ֺ52me?@Hq@1-bw~9P#ock1$T5Q"b=b+\V ^6G(d J>d-bٜM7}.*漛Df+[gaA_z=5n౨o^Ӏy̿vm|q]%bcT;73' e.7魟0?/oØ9yTr]X棎;=HkI棎X c?v棎Gtq|L>*:n:2Y'[!$ pt\'sE9)3F*"m]9@)-e^׆n2;4^F-`G:eSAVP-P @@U,N_j)j)Sʹؒgj B37b#,.'_f@i;P̣;zzz- B\Aqki3OWK)ntkS51r0n5T`|7g RBzCd*x3IN$SF;mAJU=n&1 #K14×J (_I17upSG%꿤&9mup=juj9b|ZӺhi/m o$|nakRlY9蕦-Yk@K9{*mɩ9/u0{jY@)m-%joj^?l{g^r݆E!d?J@T{>Ӵ;srvoiZV&iɉ 9MӴ9M{gxY1yfj4֢FrҜ| t'V퓐Ib.򷦽pj?ҞP,"+J!ZTTTFWr!FAQV!2(ET&uQVz'9,pHhܖ*c`LK֣s0&<ƞP2p5e5 l`=D]1e0x!2u1_B{Ò/B"nv6wq[6wb䃽6 $`HK9LHj)萤#E*rߌ.X)z[Dz_rDy2>?U=Y}w )>%\ߧrXK֗&zv de_U1ҥ$ʄ*DD- =]^lu}Y]SLgI4PL]4lOzu燿ܪɵ2eIV랴{袉\S,ltuARĢd=VE#h<:jTc4omi-U-+ҒZS_MM$1="]||f44Pڼ[NKZ!>H4l9Cimz:4[m/}3,OO>8 qeA*+@)Q*٧ gإ 5iUP* L=VJ}P;urTw<ŋEюejifhCtǾT%;;RI7;ԓ-nK].uX֏uiW'c^_}v֝Hbdً$Y9NM99$Rۮ=Ȏ,̓7U F\9Qjvֲ:R9( z(C[Nt@+2ZV#sLciZ1"[zd)c v+appRk XG-a6SPe3;\>uPHwTTIaQƦfvt(SAX;3k`ΰT@嚀ĤRک\0Sf,djwԎ䎉ڙ4b󐩝Žڹ5ϝdAlmFodBSBFp,tҴݖV`1ҍY $VtT-Ԇy˙LH>ڸL^ cuM 0TIK!J4dj]+LvNRYsTFk-liL [-djwԎh+ I2{%H qY9@XT3ԮߔR9!{ԎRKZ[k<"SL힀>TNu?>"il&:tpΊR%bNC WgB i+Y8!Mz-a*f`[1,Ʉ4Gd2{-ԎxΒ1iZBRuej׎o݂LڱpvNo @cmL2[ }P;G(uƷs"@lBJ F2k7T.;tfj<2fǐZ 5;W>Bfgzl1MPfvdvO%u=9K6z&@:Q c_%$8&jgyH 2R`ejI'l>j霏]9`Hs(Cdj] )0QtTyL0D1c&O08K Ϥh:ŧ&_Fwxy?S1?/ɯnyx}`(ut0)n|(bFBG23N"4  l~Xdb WĚy<.bYch)jkYGLwtRx"V" Օ@cV^t̿Q? 9XX&&[C9eBd7tt> M 0;9t~6Tqwa4^\^GWvsw{i"^ ~(oExdL ؞|J&_()Jdj h|U#3,4'J6iy0,q+c@]\OjA1 VWEU_W,~ DQe  C -eY2BB1*(P (ÙU˫ U ,c;.䵸҅fATXa' ȸ`u`I:UkD͚"]iU,yAA ln{H(ceq^߾y^1K&ceOA -?~x äPfq3N8XQ 7wۀbTywV_Qzx5Jr iiFR!(:9L-}ϸ8suўة{6E=zcOex~~#ιϭ#z j9LHimέj:~{~wzwFCj2.9_mF=f>Oބp:M'„(UJO 1t}Yz?-|,5s0Sk Ƣ_λCAR%S3[^!3 ,A肨Al}êEgG-dryg4zPY0X3]KYkI:BN?^u*v;0'WY#%/V]UpN{A|НvVv J°?tvڳӾPBE͌2ɻ0eOy u^bЖ mZӞT}x"R5Kކ&Ժ12gi{Ddvu?$L|A|~/GpZk bdjAOG˜xDCRɩsMiErjOhQj7 `Tl(=]/?y.#]ko[G+,V|x `=eF?eŔtd}OSD!_/e[f"u{ԩ*~Yfðil3ZtXm¾īFfM0,B28&Zi./;EI&Oj/m;Isi=fO\h.'g=_:@ԝkzJ%@J1 H0$˒b-:ZLɹl|)IxDfZxtLGeڜ5n |i~qyG@Y=]C1j<8 blKz]K'gWVOtXBLII11x'9(9DT2+72r%l"8G9L\⻱ܸT;Í(]rPYJ1ِY+!,K^]M&/d2%ǬYgί[g3#|=vE~tžIrD}zc@Ԩkƌi7B;φRLr~QWyBg8Aj=F[}xS'_K>l:&feI [z bm0+9}%9h[r$^5>T4`aU›Vh1Jrh2Q%)G1绥P7M?^@"A:y) ևAǟC6D* eUR>i*.P.S*t;>VxΝ"Ur5$҅j2x /2[pۚ2z6\fܳ[Yj cΆ]zx]k )&L \2(SfX3|:Aby]X`)aXePJHFzQ0xS [x roj x[Nq#,c7vAZvu6Șk9,V(,dXqg &1,\ `>*, †  BQ #<52aWZ֒\Ca0A :k2׍r%J]TpªI F*%Z(;ȕ6ܦmIda0L.WO[!xvI^FZ$Z 5NE.V5c,pcNT{} pMD|P+L>y3GWMr>q&tj< I4z"*D[31"(p2zT0JЀNK=./Y_P~&@mZ`dJ*#Uq:q0l-bRv.YB<: y(e2[עBR\$L땩}h>}96,DZ$3+@<4M{]bU)(>wxID)G럒m+bt_ |D%"5#HP"T9H =ZK]THK=UJnD4gp#s$ͣ볭H6bc,b!Iˎ)!Cc |&4bwUBUɭϚN٥c5rTqYXF̚5=h2K$J*j7%OWb촇]V1dsrڊzlIA $gaMhV9.W;Рne VR`41UjIdk8VbV;e +jI&Y)2M-0a43SYB@^ E+p> #P}† Je;Q!K\ldm @‘H;''ϼ=-faxԥJ+o}x@&hf̀Sj<3Z~x=m)oY?,[Iq F€%pEL^ty- c尲> -Ai/tXt(:"8s8/\sI Evn=c:>#MO ^T2׀8+jU/4{CUTS=F T06xg+5u Qֽm3wuy;{6y$ض 0NnbZZRq-TYaͅ2RYPNm $?U]`}040jlXɻ*(Ys߶^p2E8xUg줨Q\dͭPo(HFS/ }Kzo;7_ro͋Ȳ% M%~^BKZ9ÃgA<:N}tg)z=J@|?xkdPMeĴr9w -URR*.@[BdZ^lIR͕Cw"Mxn|ih1"Gy8u8O]tn~ʜ )c\\}tSaqt|\_}u8|/ęZ<͟[Y/gC8J ,NqTO/d7`g&a:ϼ_H?kv]H)ȆKj8_R ? ;InhlĩGٌf-y Vg&dyO' &ӾA=/A\3@ֽؤu @ttʎ|IimyK$i'!_/%iKСNNCګlwңp|C:e*A]zǬ5O1GS_cy]Ǎ`v 5Hm i?|0pkt ECV&w}:,ӵ' *Kuj Z*kaH.%FgmaWk3G wݫ82LJPJm->˧m=ӄ-Yجq>_\O;4c4][<)Q ĈJl^Ԏć ZqUdv⽞CZ2˶4kvHq_nǶU1Z,?Nzi0w\Ki玴stk#*sn)ߤ֩5GȐmP)d7wWA zTʵZO@Am-Wݨv?VFc B+/$Ž pX- N RHYL5 ĚE6k#ŚtSF/^*ix|Twbl~ϹiƑPHZ'V6irI=>ox J\L rh_%H[wYしS.h_{\W6=FYKmW=ݿCv}sfw)?V[lv{}6,#屰t,=nqi7r4jVrvZ[r~F6DYl<:\΄!8$tݧHANl h ]PBNf:zLj|[|r]3 m/nvgy[|% [O۫$g '5.Ro b1gj{8_.w7RwuuU#XĹHg&zl{~% ɡLj$4;p K3NOuWWyVxB*K)y)1@ax8cpfԷu@C_:3Th6Ԟ T|R Tʶ92r#,C(˺*oi,֑꒚-Cé7>=qiz1Ƶ3>~Ѡϡ_d_pSshB{5[C)t"v)$XCZZhW;{v6axRpSrx2P;5;<̉jE'hٵG"y&S0w9 hb{g #>SLVCΗi^n;{:n~; Ic]&v}&6:NԌSu)owAiBwKiZ|T+Ɵ]z\0rȩdoTJ=J Č/7~P.R"NnƯRo˛:z~td!X>ԿX_Gxv07h?ҰnڛqSnqlj.]-Rv?MuՔjt'~_G4>:QKޕ1i+ tRތ тfos39S 0zy >),Z(Ļt;{c`iC*%VuMo}-t'fɃ?9ej?gy'w"?FL4DXUDpMl\kCk[5j߈o[iQ62q۩JJ I|[MAg߿ vN WU: 5--nn ^JIrP p^m#t"EY:&")BSY`XG,!3UB`DT붉XdǍ)enU@~>ݮsl^wt Ht ZGC0 R{ؤ9ɼRiRvTr}Nޖ\RfkrºuVԺbr{-rw|-5PbOk3HՌh^})Lک9\'n sNdj-)n.S>|Cen8S36S c_8" v@_pnK;+әMa'2!]vv2qjfDDP%&|6l }phL^65S^[9q>f >1-PޏNf ,ýڏ*Ҿ5ރ 65= }>ׯ?M`CdܜC)ͭ8*؍TPoN`w ٬nN:-Фvv?74o&,5i2U}z}~Zu,斏;[GNsrޜOɵIxl _ӷ\$w/?vHrL>V:S7y~mcL]gnk- 5jdj懀8bgBM8/䅚~|#oBͮ/ ا@NY&`^ 5A_^2XxqԨ|5r]TՊO_C "!5tlN~058rd3A8*r%kpUN >_w|= shj42s͔Dke܌9U97܌b#6yMK,C,~" 54קtK3]9UդM͠NM}-i}IAVD!bwztxk<7{ͭ.b]ݡQKl x`>-Vhw4>%I1#w BS_glqh0N #??Yhp,+,.:WHRk5ߩږ~U]eW~gZ᪬m{Ce*dEVGRMTU_>WouBRf j4m!`}"RDj_))ȭV 5-R;6T@CI((X437:I@W5N}UBA'$I|׿_[HqtES F]9P c.Cį5kt%Wj(O1Zj" :XM.ȥoECj}4HB`>wFԅꚤ"6U<՗/"mjZMJ_5^^lF`,UjDZA!qJ@,fk1k!*ƀR[@y[_9 ]3Ϳ!z5dwM3F3=eBUMݡUYm Rv(\{/Wy_)p!ػn;ӼeUuHK8YBK{'K \^k=&3BvD77 @v2Mp1xܫ$S+)m9:cfDׇ>K&pvB,Y<}V@!0Ԯ߬@nN pOԎTb0@ djj!S-v@t#dj7 VrecfC1ogfoۼ}N(@섦wgNX)~.3Ї"91pk1gf7sA=_ L27?n71Ng¼A5rA>W_~ivLN'fwW:C/[[j__E*O;豪0uЧGku|Eo*S:^\Vz>|gu4_U5#^Qlt׽/4ȡ+]rtod=v4.x{x@/fY? !j N?\,ElZSNBBK5`Ht: mNQ2rږ% Fdm4**Qm76q&e)8XQ>{XK)sʚp> / +U]DS3sꦱPXVLAlY;%{J [uDQAJ=H?82uj2:AKtjc4 ZiUC":iNC@aސawDޕp-d||td!bǂU5XiHÐM t^&jҭxoBU( VQRK_|E^JC:à &eL"~3U#|sicȣ(L/. $ >d-ڋXvR?sByNzJ)0Up(S V4Lq D\z><]l|o'8ؕ:|%]ʓ44.t`-f\G@%d9M$pÚ=⋇NS@eY ]Kz3~qj4mzxoŋz.o* NtA1C$8Oj'm";AC˳fFI؂ݬiq^@wjй+i ,T>lmN5LN]Sl$ (\hJY%o 34eT0{YSQ& ~Szd3C~sQmF*MN3]`F_f V$>|0G;A AܧZYq[!ˡ2tWC_`m 1 0E3vs78]v+ukgjNHD z<ln+ׄ><4I2{QjkB !SyKRkLmosEvrhĠ^vڭ>}>ٺ-NnHTq>eSu$Mv0 Kv;N^ک߂ ԓ bݙ=}C:tv_ک9s̥{`\Z[ejtdjNHTqy!+cK2n%nL,CyCvjEa{N~UejׇoީUk-a:@gڻk)wqX]T{w#?nq\{w 90ϋ3T O8TnP5a^VBΑMUIC U`bv%Q[X(Z]lQR?{׶ȑdEˎ[R^" w`ذuMImJFnE(IQMa4`*VUFd<'222s\odvXjDpJ+΃)G'-$`UC`]laW4|&X@7k*ɗ8.G[B{\;|/6䵖f]aмRibCQ.F1+#f Yn+2LܫpO} XN2ϲFl wa8atR>|sdTjNMjb/[ EdlqvOA;F=vſ}vtpuy3)7Y;̯ogogvyr4o# 6efZ4{[xQ`2U 1 P塽Pqcا P~ՃyĘi*] }!mnWv^&dyVZ8tMsKבłȔ1y}3-xƽ嶟.'PC&/) k~~1N4w !g'>ш.NF,Vq`gaʿQOC w^r ¦m*W:89=zrj!fGs^2Ƣ⭼ɳ2EHf=if..6 5gBljCɹQGß< {z't<XT*A{vjwb^ƿc|U pke\BRbÂQN)Ljv )'cŎQ"R_T<&u$\^$0D(~ BܖX;xw6f"c·Ǭ\zHXEx^+JC+-Nj4@sڡ~ 9JT(ltevfyf'nNb@[i bcs]ݾ2;g+E _3D\n gLgi" ?MNj/.0\0Xdѽq (+}Dq]Zé$oEZI,8~Ն_-&IwmT~cqĎʫ6jh:S 1L2 Mu+Ĕx6U^K2:}vW< <ǬNIʺ4C\% BΩPH[B9F[h5]6C>pϾ"5Դ&|phE۰LֺQg@.U4.x./ECA֝RX׬z2~Iq{'w7('gxԡ*K[|D`seΖT򜃿ͿoAs"cx`35[7җ}nzmnYW[mO|Ƈg#U1*c\*I*/FZ='㼬;+|!TOLUؾ{`;.:gɐyf=u^33 vl5!K58̋2dQ}FM̳br0᫭ql0z6x-$m;INҶ.mK, &˼rʪ07\eMP@a`u\p?I?Inw":S[9!LqrҌ/5"ʇ 9C{.;{Кc{ZDQe;ÀHF4Rփortݻ97߫|{['ߞ$|{ i=}OM {j_|_mथ܂SPy|??}s|'Qp!v0>kK)N}mK2BSbE27^~ߎGe+Mr,.dT5!`Å%$Ww1/o\b9/E]:Nh!uƦ\]ڼrE;4:`{M+iʴm 䛻Xț}yn"v~#{NS0~xT񶽀Rf>A+.]t)@P)sG'%>.[iʹk'Z^u:rȱC--LYFֆh'5'`mdi{߂݂wܫ$_;h[VM tz:^e/P\Yה D|T Q9V\A ,R* 2rTePΠ_''5d_βC8#yv-# K/?p|mM;̶m[_Ƕ`!פ)SNsC]!]*% U- Qa&ԩ1;2;RP4rZLqsAH D 4f6 0Bl@ީ! i F&}Ztbgu`X]]'5tusK>h l|\o<+j' Ja.UBY ؠ]Tj׏o-ON܈ =Ӌy1@Sjv8h+(Áֺ=yX2bHfVfGn)x얂EON ӹ*}Gxr->k{։όZE2obK-eg{_`K,VcdĊmh65m@C ߈TF7k4q#^%V18 yָTDl@Ey D2iTFg!%}Egf:`d'"J@sG\-eO@_f-.z ̱Xm9N;EN]E}=gV9;:g~=SOYv0j~Q1(1, @KU7\\ԦqbmK0yžR0i虱#BGmwil*୍ӅCb'CT+ǷS1dqcnk}Ǒ|Rd JQWj7 s^2/`:XR^|CS;4yiq:1.a52pWjn11nLݫQ;qG`7sAI\ }APj߀4jNȴWڂy>Z'%vGh aAR5 X4EJ,F鹬 D·hRvK"VAb/vJCECFkl h\$-A*zCgFvډCt(F<$*{Q;gA٤U J힀TCHS[H3W*ҼT?.hlhnEy$pTuNDPGаɨ\ν{{Ȟ5]9?02 S ;\]ՁfXg+_s!ӍxGGӌ){? YɳsjILU}"8?i2kr#:NfIRQ.$*7Yqv|GItC⣒FiÉLH@trv;BSO IEQ-2gj];AvOHZCvy K2;yP(ffh %g͠d|b8bŠ2k7 ,6̎ԨAm%Fn[1Hˍ.SЧ@w>j9 7; 7:jyj|n*ST=Wz&5A\A; *;,N;J= -YiR&cmM:(p ?:w' $9sIqtSw|yGA g`NTDaNr)/g܂oȘ9ǶԎԈahIڡdS١Q43@$DN1djn:Ng!\v"|7ٞ` [ 8+3Aj.r!Q;M#ژxQ=->tXv30gAdanA]vfPNeND!Q*ayfjC.5Mk5_w~zsp[R:8UTť"m 2D ?ggM ?K{WG&@,Md g_ޙF [˒w eiNԃEӋ֮P^Qd?6+̖:j(*T V\e"zmsnfQB0U/[uO=UjEO6Eo<_wޫJ{/ /)@z,l!dFr|a~Nigu(Ʒdإi7~o(Lv(\~Fh$ب.1獹H{*b.nhBϻVHC.9 oun3!Wr4b)-6Z983l$77UxvO| @'_;$[ZQQ_ *mSEz_W?g~B o8/oKh\3b~LJw9'p.߿ϬidiG鶬?/./~~u>z3Sk| =ܳ yK ӼVTm4Giy4=^$㰤uk^woSVy<­g3*řo<97=.[K<@uڦ,hi9vep#ǖ[$ sP3)? *]Qt@a*[ݞSM+_sz7XGZjO1qZ5#w&ɓ$iYUPsHK#qӦAU&Az$?&~o\'>FO/!?6pQ SlZ?tffym&YXf7DŽk·%sEy,7E`! E76ph.n NKAubΉNȋÁV. >T5!n/|cO/Yp7q/cjb KނJiV%VNJ!"V%Ps+ezuk%ŭ9HC'u(f8t @0s>cqyk+]u: Pͳ=r{?qIRC 4b!q4AN!2] }]&oΪ BW5'jGGU4`m7Bmۑ}6A04*!OYapAO\a2 Oà&F8n}9gowmBr"sZ=6ZnKVxa$ |X Ke0,1!.>0'=$;)+Z Wsy餢߱:SSJ¥f>\ve:)CK)a6O~ 6pЬ'ed^U7Rj}f'5B;\!CI+p:cj@ RB3zW--_ c#݀Lunta~KvV Jɗ)s'u1ɚBI L;rL2$M{(HyHn3Ј 0O,6/>˙2L4Oetc6LУ7GkόV=ro/6yXX#6_VR}(+ 䅗%/lbZXI]/*lTЊy*%O+h(jwIԥv'b'uj9S2j;-J*%"7N@j+xQpZG*,T ۡ*8ԯ(]idiM A-MNW^bܢ8|+[Cdr˄XM3~uNz'V_I_҅*@Sx0JR2JOoN K-z'zʀYWFh:wYC5xG[%x.=%'|WKnɔfdZZp%@&Wd=1Iп- v%Iw ":r:ܨJ҇VOUC(yQZ*"%X 9*-Vr) ;О0 \4y_k.tϹ=ϕ@8ԇ+A <#f%0r$J@M%W{]0Cq5e#QrsQQ3s%tz@saH3.2ˎ#w&Bb|s*%K|ȸViƥuBs[o6Rǖ5Usy3Dir#<HHfMrӢOiҜ^ 5QD/ qHokM5&~57Vc2S+b.%/?OZK"f'!Z@ EGOhYjˎn֍TbYMaNw?>]ޔO4Fj#1jM WzMeVlފRD L"N+YA3$6,Euڬ[X)% GI3 -Ycģx\n"6x7gnvY޻(%)^$;O/ǟxFuo,HǡtQ9 ^G8+F^EXb'{wo}`QFۙ$srK ѰFXHtu#]7g{ J@("A۔:5x,`PDOͳS}FvS G;foyM pk6_)VBo<9^18=ӜS2~ ZyZu1K*gPR8l%=HܟnrNr"|ڌ3dqwh!l쩹K>s0jɂ샧!7mt#=Dv:W؞ŘZXlx]:̎jAnW7oJ$-?{a%.n2`HRH߳HRgJiX-zٌCQr΃VM2w;:ŏ>&ZwBSP,xh}5cEfhiFXڻ6z:|{f ߛH&-#Eӫ+.%?:z?(yk-@~1t%meZW} PY]Ť;K]aŕ-,rҶo` ua>}Ю x蘒M}vϙ$ &¤Iȁc IϡHimV ),| }kA_Wglm-,bN;4V+^Sa14ڴX =3Z3Fa$Wv nB.]RQOhSA=3e+ϗy^-<} Oj“ڷ߷rv! Jt]!gd]rD_2b1^ /x_h'nQ-=4;zS숷eYQ^܍ \/Mn&ن5z ;l]LJ zZI[,Yj:b=k鈛MW@fF#H1Y _81lygߙn9ѿ-TZ{fj\c37ƐR-TBCڮ$m0w+8'@ >N"26+v`r &c)R4$;EM$';?Rw}7Lk`fTpb#ͧ:4 'yzUNGM8cfY.e$`kN| }^n"a1]f!0kukFrY}6!뇠lgi{Pah0PbL-P҂·t&P`u_'l^nxj?5\ ?`6Y~2֛>YH kkXϛ0%.hDIVE^P ,F@2 4GbuЕM2AuVIRP!SԖ$#yD4 KM)Q:)$PNhhYNN=-MkEi?'ShV;O JӶcTֲs+b@&uef~ªtq%%ʁJ԰%1B lj|IFI1J bJ Z,Mz?p}[FuY:.cY):R更﫻QC]̔;VORZ}Ǫ,C:0J,[;6L?gř،-\\&,3ūw- I}hQaY v-1Zo0jkcyS;Wrhd;9"۶.:\|q ]b`7E XziỒ pcPXW\dJĤ[gF?goǨ_l#Hc)V=k/lbc"_%6ڻ=)A+#GiڴH1RV:bl`:4yJtNcj2Hbjll6$i|!ͮW^ { %G ˏVFt%eb^xkKΣCԨP*Zm!t}ld͠pǀ[z*>G4E$"h؎WWXs5@EЁE)ZYz{eLUI_ȈQ&)u%>%)Ũ! jnҋo=;\/{chڛL)bVHEc,$ ðX/=hЅhIrygD7rmd~aT-+*:'S:- O~Kb-qvyxa\hM1bU<4ʐϘ'\ame7l,92FcT:dRCp>T>. \-%''?M""_n𖻺;jO^Og–$2mlĶQ6ՙzC[b+aqT2QfNB&AQ>kk :Iu7e=;'5R_h-T.WK6p^hL\}s/,I+1-~]ߏqWSWׯ._"$D1YQ1E$%zHw=%xܛ|5^+G` jݪ@ڬV/ݠ[>zؼ^կvO/FP3nRR#$Cm6a48{-i &=ۑ3HnzMSzuFrz4eh׌em9hqS̓3uwInrR_Ə?~x][Q:up*Rɤ@6ti+-'c3xOLt/θӽ@0֓8O OtO;v>ӲwU=1Q O(ׅU?a3  RepSސ k:yZ}3MJO+#|:=,W?Q`ӽ [%FyBԷS})nԲ᫕/ms11o}9E&4ݗ8hѽ{s_e}c{g<X\ۮW_n7[eǹjXXd>߼[|eq(]Mlno~g~m}oS_|flt?v|Q5Xݬڷ_mq[ͯt_5Vf]7Ⅻ׷_7?۳m3`'Ye@ wlݫ%>qߐ\FUNڵ0*\5 m+m-RH_?_sQd$ݹ#øC*Q^xܮ` cdJEq>{A'X>ZexnF7BTJ#ޏxZ𲚍e[۞}޳8O~/˪*USoX/ºʹ`my|WiZuIPeӴBg~<_>gv^be-x[ ;;6]iuOD:m{[j5bԫtPJMJ=`Ef/79w%>둁ؒO &ؘ@"Q|!2^nD%#l#'< wܷC_էL!eRԔv3CTB"ҩRed*a^ j\0ݙPfHЭH0ۢ2D#vcfҩŖDGLJ!'N `k=m|*6ν$d"XԆ j֌mH-d%*v+-6g< /ЪZ;@zh{h[3lf ǎIէW|_|aS7"h+EiET%uF 熕.|EEkM-lJѤu,#gLlӊķgj tPv2`ˮ*;UE]eM* ^ p=4§ )I f[s4d;5:e ez0NA^|28gsdi{;S=CÔ㤽~ó <6=ϋeWy]9\j'pK V݌e5h㾾$?;j̼8k?2jt4N$e5mwdlYPץ)y :l,#. j]mJF"ZY } nCVT)f?[2y29m֥[;pr0w_C~~?LI gs,vS|9;vgaҎVT8X9))aS9)-%S02eE0%)%fyQ׸Fm_nm_>.IS"+CzGIsDpp&#8_l0\S?2 ɣZ4~`ީEfq0ÜjK sG dce@'ۡ|i#+ #كO  DK"LjJ*>MƼ1] do>~R34sVkECw#e#&9vy:SMw͔g'>7)xYHV,#iƢP,r,c@煀O'"rtbcLqNx-Zbf-[Ffif̾^S'6? u>!)$GY)k,eS&ʡu/ )RgrA):HJPFDU0]%%ʓEU"ć/(8@-F1rIE|DrvBD0Y jsV!KpVMhms/ͺ̴avM7N CQ%s2Ej@ 7ަf 1HKP6.[D– aR]Fǁ(\SY5Ãe8חD6,A21n^YZP6}ľm%qa2G&#ꢑ zdt]$A('UGrRdP$F w;?LfWK8/ى*c紮.З;jZj4>9Ne$bw f7 oS+tfndYTή4y`vn>*ٝ sw:fG@ `Ό1.81Gҩ4-9Yx1̓TB$1cD6lw2j8x<h8xJlIxv^% 䌇L(E;qxđȼ4) 1SI,4Dtt;f'/S1;pÙ aRJn ߴ(Ktj,ea\aY&[(+*%ݙPb>>̡[E0Gd.)pjnd%Q;*P;7Toa[ӉЧWJ(Dz\j!l̑;9vsZ33rt|2!I㢻v#`9`jP#Ї5dB΅ڹ;J6\iP(4]Kv0-qQ;(ۮ<DQd~.QvǃmbnT9#kn%.ڹ,ڙ8:yVd4RPR;NQ {6ԎS,Ava\TrM\J9xP.!=hqvih&̓DI|WeGg 8r£rU ,d ;@ѸSL#w}CnHCਆ9Et4oS_:S@$f}Tx ԝJ}: fd̮@HsmmJvF.D0 ay˖ t7KR;(*`*I(Q0')E#i$fW&[Dw[0DXܺpYc1 1)Y*}t cn:;$;9qT} {"hQcn2ҩDGLh$iDHR$>D(Qt6aQc~sŭ8+ ]PQ|sVEN܍:->ba4 fw; R0;wBRGqBٍ㛖$.L$fg>Dm rq1(YEwcOw2F]yPDw깄G\#G*U<>4k܂q[51xcުdW#ιdb0Ia":<4 fw.T%>eme\/G;-HO%Q;[?ŠcDXszE50$Bй_"M+Aĩ<'jz<=|Z.nQfz8B} ~; a1%CL7li1j女QRdD%K*%Քz\tn9n[XL l M坰ǃk.|zi~TwziNe*& 5rDr0et̲LdӡWKaᰂV2Ho1ǍkM5@h9#* +8 S/ ) yˋ*cY<V:YSeHfNi pXMY@g0`N>cDmx.QӤvZ }B9.Mj{3cvy:vE$~K5SIP)vD H䩝<'j'bUpa9Q;v{t'Qgv1TV`.?uL8q'jGԮߜv'N Sh]I`hI  }ŀq jwXjkBX5Qeɴu3LI8;#j)K8V@CsV+%*1ŁQDž3pLIoG/vR葐Q 14ϸ"j-DFV`=q2a&pUg\ Q(楴ݻ:N20Dcws;[ 5 V1V҆ n*1U [7 kFl*џ=#bX[9&UOYD>pR(FJYe7]I:iБ~XBg3ߺJe.s]-3}v|YŸI37'mB?)7ZF0 }?fa쬋֖'DG`v)sϪI%{X VK =ߥӪ. ,Z 6PzT?WŘ55}6/?_ܚ vE6A{T: &.S˴ 0r:qLX/T]y޺ͅh2nj|t c 1}" ˼>0.ϲ/8 27C.W{D^f(m6 scv!P4ms0ѢL)L &[.ŒbkŐeui߬|9i2 윦7_9|hcvt̉C+t:q. DU㛱_ԩ&cX%TabҡC }1ݑP;84ks,95g7.h8qN1<+ X#j9&Ycv~ˈY XcY;v h䩝cRX}NNr0Ug}Dm>/v% Q#v )y.j+LiaNqpu#jWo8R;ujC9Q;km< wvGDM-L%QR;QpH!-ÜF0EłRI&9ӻ;Nu~|V60&$̏?owow:ybڈhrl n'jOF]cW7`Sf 0ϊXj**x#%6 T*.W&ψ\4`DY|_IR< F4: #cfJ?D"r W78gmIS#8r2<DžV$rIQXE*uqY&NĶ.NߤؾNM܅ >w8߲@Bgg%Vyq44z)";;PpᜢHJIroX 3uaeCvSYYoR?'_ca9Xy׺ % 2ƀTy̐e'ThD1c Y"L8N H0eK.َ:v>#b 1nY?V~Ap\K KApW\{/wۑyݤ;qGn p(Ef yőB8β Ժrw{0I:~Ŋ8p +h&VC Q4$soMs47Po[| ܀[{NsKEvtP>I" 0; bsMfm7Ŕ ݄D6݉S;N]c/;m aL^{*s~k$~ ?:yZ]nAn׼'BRBq@`;~F-خ/ a׬ S?9Mi?&tcxpnsܶp1/0nc=`4ϕD^ɚRzU+]B7o+':#̪SôaE>ir&H&טMdy[z^î1NKYKdtϹϥ#HL!kq>XYa\s/b ϯK~CƇo{ )H8unzwio|-uphn (yŎEf1CK4vpXa ޗ#""/oDYvo#cFkH .$,6#^|!X^BKGuA f-l^R窨-ѧaK󫢸+N5`[_bUA}Ÿ==[=`:s`mD11|ic+!9̎zbSv (?{YG,[A4AxmvVo<6 '~QXJ}ҋ2GkcSмr+ٽlM{]lXMڵ53 !%>,;iNd" , #%Dk>ۍU˥Wu>r7ݡ;9>A>(7׎O?.;޺*7޽ys۴W0:>bb|o1Msڳaŷ& f@ns/SJ@L͐xc7C͜م%IR?g e9k@=d\ Rru%*-%ub`;gBy-m1qDKgJ;ta?s9Z_i/*I{8~t{wW:_Vuϟ6<'P/8٭" ?"T:S$O:%tmɆB<BmXLWmLe 7`sOR)VXkέJ>.Es, " \ll9SpZdMVU=|hicaq}k8 ؓX+*fN*&Fpl1-ɘ7Pg4lryj%rW;aa\He.j҆rb'aۊ_-^KŰ֤Ά`j~8n0ڮ`^_=%$5-\j;KW/v^} q.7p0'~"ٯfۑZ^cve`nG*w᠞n-_n;\}Tְ޲Tְ֋76!¹0.~Mߖ;IZ0{O4~W1!=.oBB(6q'o'NY'! `{c*_lŃ3 Of}WTYαrN%7w(ں iObLڌ%'CL!U,YtE%T  b\Tb=9;ROviz V%e3_vOz=t{ZhV5W_x6Ʃ=?^}gsn^-.[977x8m\e_ۧ/uv;޽ sDW_=B^ydyO8|xu HKmG_ǿy6dD/^ȧzӾ][Ծ[nrJFͶ^͙l;~=;xovިlG\kptJ?_sFo`2}+)5␻ ^=wqa+㊋, >S#(R;Uۜ T֤ Bs6T4!`Zws&H[|S6=nr ._ف!L!2 XҞ!WTL>шl ®lf- [ER_9roGA`~>A~cNU9~)PLTqu~f᥺X,.p iwWw&{?_ڤ^6Mekz&,fr)5\)T2`9"qϺdsbU/֤~_xq&޽C//_Xo7,}h𣋎WyhhDe;{N^'bW^\"䍒@%k)dUshS)zV-dT 6hѨ2g~?1]k,ύ;(cԠxt5 5 8pgϞ=xDh 'ZT&P uȔ!:,L$y(YUiTr%݄;aJ+0֯O X;- 9[;Dž?_7E)ǰO^_ÆHZ\{8i&A8` 4F3wc`D>2B%V fː2XjvJ<> (_r ')B߅^hЫ1"oNEG 5q l>z}!w~ *iJj]#6^y4 '$눅FE R!+PdJk%!KoR1s>*ڂsqnؐrGq$7UQ ,[xXhɘU!(_UlV TAxwT F.f(k,|((nMǀ_wכ-0!gDPQU"DDeW*4SKMB-!1TBuIEHW5>s"J +IwR16\2)(q9U_ x z=V7Q.f%MaEcNxv$LLHɷo+guhy'8QPgO[ lhgD5Nf·x[Ej9F˹R$Te_ Yb,ޥ`0B Z0 F!h&%Q(>ZM9] fM3Sso(#o;Oc-K{6lkLIdz@JWY J`0L+ Qe;vC7BV6F=xGs8g<[ A58`]FsIHR&Oj4y4C4ިop\}5l?gL zhQWESi2[WγTR9# VH7:Z騡ZP96Yn.fHK b"Z!$ĤXbj֤8'vFBD@$w$d8)OɳGc`OUT:?I P }3em٪+[u@:gF$Na DYtu lQLRumQ+0S;/к=o@O_rh*wWv޲}e9naѰsz@Y9_"NYb7B)Q=Sbd!=u]V՟EOZ@)t;ڠ`qV>?it =M{]i zenx. ?4RzӾw "6GFK7FA7G%sOZm(De.b٢о1~EO-P05IQ;׺c2~re m]10zp,<1Xx_ټL.m^M0 _ivWx˴wB_;Zob¥ȮP|!h2< n 8.|+ts6Hoczk kP +n5@`ݓW[:^қDxLy`;|m@2xyY$ Q=k$ZhvbJC5=Ea.=Z>1zc /D~5c=w*Y \w{"'k9;;Zh]"_9ܞ+ɡ0;@dnI;DnbKZBp#GvE0"[;}wpx-ꓮ.7.$g]p^8d%69TJ PLY3xKrJ6,U,G੕;A8c6DaE LtOBC"ZNb*ִ”~hv;Yĭĝs4ZnPl(!+3U A@ UQWM{$p+ŊS C<9CNlŝ֫d] f"Wk^ GҰho\ܷkBܙN7fmw@YfG+`];@zעFXgӼ[{ޣyⵚ>y2~kI&49ȻGJ0ڭTC4 {!Z[[cMKD:5]LF\x[ ܴ n!\޾rb|OC~tB~reӛ,zoW~HVE17nZ{вZdT= Nkks.5jmΪHzj|ZUq39Ly!{zCW=KRf<V|N)1@JOF6dآۢ}uCB0e[=}M1VjxQ3~kȐwkYHE[ B) 3)ڪﴂɌ!sOp ]^216%_W~W2ַn ?|/o"GW[nrytKneGQ'p$-ٻP%Zx\-y_hD6DKFXzTK:}5vv_Z7vidJhitտ;LGFěHG}rU= Q0pFi GSwD ,qvH0@r%|BNBsɫ֭׫Q})]|e5{e@eMkLUp:*DNkTX^IbVRNM6a}% H39x-NrVA#ދU9s \-͠((Z2iژ\o_omis1n4BD 6 ҫNN60Q0`I2dgx{we=nIzcb CgYۂeehIdK57,^:Fd*̈8~$i&o,vI>]Lq[?-悒۠(E:ʑu3kw. Xp~59Y8@'~.O ;z3Z͈m,~ ?#|ZW/c q߃Ǣ_i{ л,"χ!w,2L,ya#ebU*,-D7O[tY¤7v j2vP1\e{Ev67/LO3`C{.EDɬ0?<v̯ǣz,^={xZpp˩nzrG*D-х` ;ENέérf bOcqQ7э`|хF!pr~Wm]ygoU{*B VY>CYOCyu1iݗjj4{rNFYSmKyǍĺ)TSĘA\șHu\.L|L#FɹUHl9߱YQs,sA' ,XBZ2;Uēx8+ȧsy$.#)3,u¬'^<e1]ʊ*󞬨BuUя+UbXeEB AեIsq0J*Q l>!Zd$k)pbH,/[AÊzvCB׌O?:o+>L/>]rB5T̩䆲RpZMلo$XNm(J|iMN90/1M,EF$`yZm!HHJⰬaWʉ$PXza;Ӕ3O# #b0UT&*(HiuҀ,8+y6D39A % xLֆKu Aj3]f9J4Y{\VSIPR+X dcE¸tJ\kCQfCt\ڑGC⨙sv2 RD8 lB#'('ި.s q ~e%4ߪԬD-Nz)0O(RiCP0ϙ0x5`D1f)XhiK iSy|%?فG`몈xƝW=r^bzKAQ{ 9sucf`ݷв|iU;H[_rW.BrγܒRS[?.M xdWԁGqtO1C9ڋ]kh9b |00SNO~1VB`xF!Ӆw ův2.>]^ n)tS~4\?9*"'mFbeWr)-X0:uq]k1AO|.ŋ]AIso |>$̜>.|.E]0)z:H(n+Ol ]|2b'L> ۧ'a4dum ?[ /?%9nOhB7Z3r AhΓ,-ieE+yTkDV{U|k!"{]zMfch 2e%Xk؟:1T j; ? TDV]Ψu.ҒǞ{f d5VPnI- !le. Y55ܸGe I  ֆ+e0sLC&:!ŹeDZR%┵pV}PJډoeRs"hap傂!WT"x͂I&ӄ]YaR=|ƜX9j@̩`bb*Oʣ\1K-f?ZRKEN7C|1/C! ?&ܪT>U*y9,JJ3i /+|bŎv<"TH fXҘEML}NQX"J!XOǁJG0j-tZaOpF#ð a0JiQB vma}r=Zγͼ?銼o>qG)y>nݘyN=of-}f6 Gh׌)Eek/.]UUӿ7YȘQ^~Y,F'7#;$H'xN3O,S}L"PE"pMm9#en=9K$Hz΢Ii s1gt~YhԣOS4Dm,h\-r$P^n;? B^XA Kα&B }cIB暙*r{!"0먨kBoM;k( R XI QjJR*na3vԭus;^.bWQzqNFYTsFҺC'6_CrDsZ} qk\U׌|=UVn5HZᳬFIș0<({xfJJt%ִ9mspԜxCkqny9uG@ڤqR.ֻeQī?Ny!ߋ:9 f!uI_JJc\)0R¾re4GP `fM ["1&qIT"fliϗUk)]u] d5*S|:ץ:u>B2Cc/澟tkIj4 ZhN͝Mxwlǚ`-_=B)9i\S'-4OHi:˶zIq^6_jjb}v#ۈJFbM:YDXҹ GBOCC5haCk?ROs^:ƾQOdR?Ic淓Ư{]# ˩y3sc}3SpIklUxuBMTqcVh#cΠ#c\$fD* H}:%9u=u~)*#1Y`GT*T&DSB%\jAf IFgL<-1ma$zxHY1G0ɩ4"GgbJCb}hKDskIPiB8 FʂI`$$htCc)e1TjnE^!Cs?+<06S ZsAhe2 ;FTx +ƛEގЈZ%Sl=㛉-?XÂlzδ]ng(ɘؽ.̛;.r)9p`0⢦ԙdyR$x#:A)K[E"w3W@1(B1{0҇`.ű+LNj 2+OPOfOcUJ?L~yt@Zb%0IqD&-,ӶPs,(mK}TS G/ٔ@:4/7.?]Oߒ۽Œ]Q: ]Q P.[bETbG[b T9 N Qiv\E%Fp[T43Do3„LGT] hhZ]we)6()ZUvg$3dU5`\g02h2Zh33_#xJ5OϚ&cwǎxlCDOQ]% J%fK MaG:GKGGS矓+S_=jYovC;&tB{\34sLn xFY $ƫH%ԳiVNMRGN׋j=+Zj'vtHei\Xr|:,ofEzw|Ku:ͭ5r|&{q {r?ǭq(@&ҩ3Ҁ~y*Gm)T9L%Ebx} AXd1y(OzFbf?{3ͼ=Ȍ{K;Q5 ` h:a+n:L6ĄMD2Ȓ"1n1bLbh 9r'\,\-NΒCg2:>U"1M&(dmԬqd#磥_Y1?z&,ZEY!zhɋZ g]Op~i_".+"$Kd5F~EӯE{ˋNFZ}1mիɧw0ُ}s{?9멻 x2DŽշ_ v _d޽tȏk0\̼ܛы? |,C/V5j~67S)'|1,+0 /0bZِ ""sVR@'ϱa%f v?ۼκ!ƌRZ⽇N7!Lo >l:Klgr{iS]+. Noo/'=E|EmtC>[PSܜdʂ$\AHo>|{vp\gWW3+kv$j |jOr?1)׵{'sbӫ$$j(hl>zûoܽ0Kyk0r*nSvLvP_pra~sHrH]|pz& d34>ҿjf8oIfZZɃXXd<1ht-kf6OjиcÞSsI5g(+f~ n v:$]0*f/R ϸNca9F&]8_* U )s@JzAꨊK Ri[؋T6=2쥫^0qbx1-5gFjƨ5A[[1 l 1鴦ȴ$|ï#4²KB5`!N"4.X_P_\9"HvJTllw LiuӠ\ϳd{xq׳ `TbgI5'1±" ͬ\:o4&Ov8(B8[kxT@}Jt6(9-?}kĺt$R'^1z& Y"`{iGbY Hl5VS1JrBإ8'ၰBcd5xٽx$E}y{:'~lbBu_S,1|r9Q ;{GWz7Hj|G2b8~P  - V%D0 S'qBGZ 6a]V;}-A9m).o=A"#,EVMJIU/vq~7De љ"lHT"!61KۧlЙ|@.Gr?tOw/=|Wor߹3]7 ZN ^FA. %aYDF>'lqBl2?I$yy_eKzC^L!QHW(9BX4Z`ݷ)"W}&,<{w`"?8skvxy#Z`**L"b1.((c]ijg`#HI[wt:quʵ/g8 q4 lm16\=JZ)`F6M;ɕX~[ ߒkMI^i5(l:t՘ LvM﷙8"w5]MWZAp BjpJo: pEKܲ+eQ[VQkԹe2_.߹ee&@& MKF*$ R"QZ)Cׯ>r {-O8I@.,_nήDsY] NgK,e N&Ss sl(Ln߀t]O^|z3wxH!&QׇH(h+/("zethF9Acψ%2H[fSrq*Dc!0 Sj3e@H> |UF4#eBITs䬓bF ja=RGBo3I xJ*%2# Mv;:Δ&9MysL(# $_2ٙ9u͜4i̩mkPTM!]j\SBD;MAXHE(53v5 Fa( L8$꘧S*%NQ 5K& Ɖ$Vt5.{ 0$Q#;8\7\q+@VxWxـyoalem`{&IBP9Dyd]dJ&fE( ~Ǐw#06.$G(~̟yҿtYc::] P,k'f^y0Kݒ=ؗ7z'rX_⃀FyQ|r\ʒ];|ttv_ߝyA"P2jUiFIt~:{Gn4lTG?'`vuui^|g3=g.|?3P;pLw0CgxXnF0 aǩrJMkK8*E`Jhz4m=qs߫\:s$ޚb/o S7:uLe/Uŝb BD5-+$Eޘq+ck$+ZcPlSƆ;Q"ANjɛ\U}7s9`B2*4@zЦԦA nS WQ ٙ߆ uIUtQ nZE:!2u;5%IN!㹳˙g˷a Q>y)vׯ֝)?v\_B}{wU#ZO?{ҽ&>m<=x4Zf-#wD$0OD" DƳUh"ȞVL**J^A t*џ8]C#_xg3/y3y/V_dǽ`귰y3&lADh-}E]ߥc" Kb1$z%I58mT@P,+Σ*p0*iQx$g naL tV foBD (GRolE -Y7*Y^]ehK*+o̾[`BVj$"l{l9>m_EӠ,̱M\χ NEH,x]^_Efzb1 VT˺u D&ٶo? o‘UR l^3 [UE?ue#!CmF#EQ:4$#>ˬ?@2u[d?qqX/8/x7,x?ǻabLb\f41`X٢.V :O\$؁Cp%Z]æf"^ l^u&7&QFr lɮlpu.8oV<}%'}^ؒ}r3>O2>( ҼmeQ+0@a&1,7^Ur *5;ʤVȥU;GTaPH&|#\И:pKJ%8"֔RpV$'@Xad#%yhT i$:h7%hcE V"GTI}:>" 0wgV˃ J:$CQ$:ʬbRJ h,EԤָD4O 1eҥB$#j`1EKjS~->OfO)&"".Ie5Cwdq ##&q8GB *Q?Ym6/f//ܚ\}$:N[ ϭCM_L8J\5O߯8צI&c10.g1yr=H40Hڋ񜩝i`)YQ'$,RlN('.5![Vr:Vav2:݋Ih0aAG׵W +GZ\ҪयApEKFeI ,Ӗs~]4x8XS-T[G y9 XOpVV0WC٬[# ~0N'}j-/Y:MG RJrQ&{?@̘nv<_v1y-$X36b p:bnUABoP0"vMZ ' d2Zg[ˋï_~KY] 7*:(VR4xLUC4E' tÞat)?h3䙌jb晔_KB*.yٓ[Lƺlv2-ǃZ%=ց')!CGtaiIzNƶ_h+BJF ' Kq_Ӥ4jW#"Lḃ:8{\\/]ao&_>@-"h2 < ,aŸ&Dy%)\[C!hIo w^*q)}P/%mi\a|۱{3r&9Ml#TQ8~_gh*!D5jrH3#-iIT!L&4׳B Pg$~w5hslh~2E+т+O=lTKLX;@$z\ dRҪaZ6쉩"eAS+ܞRx{vWϾ%:CLj\"54k i4;ku("X Uپܷb?፫><ۨђԀ?:~_V3d~_اW|:ճX{Fto? mfb*k/|tM[{ OͻjQS7Nͽ]RńPtc VkHށfP]k۶+r7{4x?j*xR8&5 V:k^ԘyiG{l >фaȮٹ5N1 `c|N $[|BP<1)Cm>MqA@0"v͟ZIY_4\]ϵ*!r8=<XQcVmB,7D%iӱA%)r3 85csK9// t#`gj7hspʁ,Oa PM $99t>L0 (@W2wrDZ>zg玮CL_sAHތC?gShϖO K&.%:9h A]O2ha;w.9a i eB+C1K翴 4+I D ~G i(J}pxH ,2^A~_>>|H<8I%C+"9CWtZ"DS8keDm״$o CReMEmE2:bE9-`YguV DG}x9(O˹^w$Oruy5*ߜ8翥9Vyhg, H=D6T?2z̖@Di<ϳK`!E'>]ȖVK',L {*q}l@3ⱜpd~닥&N{T> ǫ'!v{="eU e +ݍf܍fVKD D[kQ׾2uژ17q&6Q޽]`,R迸S4E-Z_2)9,x>jZuoM|E)o|p}柟rrI^,Ptܮ0*f Q% Sgx ǨT+ C_@WP;O)!!BV.x8rtku#$ F=Z]7F5|4CoS}A|:{+ ?ܭusiiZn˭n4|Z]NAyw<.r;~1ܴr/`2sSc xs[f}=1o2U 0O7A6Z,W%QX) u?;wr>c4άğx6rؐs mL4@pEy2zf)'w *.)A+|«oӯo9%]_qmy#H]o+iw7762Qf濾fՊvFW+;L Rk*-.5 @b ZlTB*R; ٺVETo$Qw 1r~.H0 JTihZ2DTK‰y?Zo%ɕ[pv0齰4:9*JnHirL`IMU%0QJo:<{j4l9X8Z ɀ@"#b%N%xl|Se$Hb`R!<qdeDe8F 8< H.] *J*WjDm'ɳ{弌@sۡ?12ājQNQQ >魇Â!1RTKFQIXҐ0Ri8"((d2]"t@(zk"5غ4!tYG\}'i5j{1٭",YaB7*{[gsLXg:k>RE?^$ebBٝl-^Uyܮ1kL[Q ¸8_LMcjM=`oaϞ0RsNpFѾ0/Ƨ (o̼BS^VrJ>n-YI珷Ks!x "zJMlC>:d`$hI(yZ 1%zJ5H}NIqEsE&wާ \.GwӉx*Vٳ\0e5r "{8$[cq53Y I4{YzMnZ0 ~LpV" i̦ôy=09D}js7r05%-}37xTJ&+_ޞ a%Xbq&ٴw!&_ӝx$D/F<_FjiduAJ\uCC쐔ỵ,qX}30˜C]t f/Rv^jΚ́0P5k2(/OYUŲF6!µr3mBJ|5șaqzPnra`I(a =TN2'a ~ %V~[ TC*Q[ɠ(CN\2GC.$;3&#7l9twĿ@+K%B_z_nFwm^AV"[AI4rͯq h6^_[[[[nċLm=' 8Qg4"e4aK!`aDԍDȘL1,E2E$F>U( 1ʹ+5/ IQj۝(Uμϣ}0J_![ O0wsM\&={n޷X~"[ssX! 4c!mxtr$ _YPO(lؚwi< Yh=,F9꾊 Ezh}:xDXQ)k"n|b~|zlܴ<6hs:s'1&n:[*M4Z@@ɉ"xn4lM5CPu`>J]X]a1jufX!I;G}c\p藾o&ը;|XoEC4Qt<~\"kWMkSVSܫ+pbPƒST^В˦@@۶˽/]zjINN-BGr2&琐ͩ)ʿjQB|Fz5*݉Z82ex-|!.Qr&c7m_ڴzp`cש]ȯFM]A;o=m ۉ /ɻ''ʾG}U~}X_aNeg\,]'gP'-dC}Y}{}e/Hb&'W8jEj`޶s8[TvN0մ$zvzi%?h>W(?},srɄ a( ÁL4 $)t39\?a`][oF+^Y}C`;A 3Ft$9 TP7n^"+"fW]U]UuAsoNrOࡈ(j ن)^ ZF&Tƃ N$RCvG8pCcH1:QQ X]BR1A i F7DP%99@`d!hOGRDqY,j\iW< p@R)ACpI0SɎ瀤EiKBH7â4A| S-BĈjP11JhZo&i]~u$R6BLBL+;Fs)D8O9(*ޙKYw"Or;D·29QV8j:>FƮ^ `&ݡ;dc'+X]hY;#}{$}D}6тvp&Eӄ*dcn/1RsTU' f?V 0FYE,AٳJÚXUЅ73φn]fm֭`R!P5j ܵj^|emvq%9ûNʍp?'afLf?q/C4w/\"5`!`I(WcNf qohL!LCbatD7_sm~s}l bu4R +6bA*M]dL׾|Fsm.huf׶0;66!mlSX9l͵e)9ҏyzR$50N;j5t hq$hSDrJ5gڛR$1 ODؾqvޠ cMZH|;KqgI#Zp$) &V>U'}nj+t|oj+I|&2C5NYaKBGq^W1,8Ys _Yw&n|5a:2M+]?MgVNPgXΑjPp|ϐ27HMT; 畭d4枑9-H`HBEo4~^ d{VDajҋ窦`$%b S)0]zR@QZ>-!hygw zE$B|⼜:TBg.0æˍyJ0ƞ>¤`k6Vwyw_ޙfcwJ-kl7c85raZ ]lYdB;%tuꤊNWpZۻ̸S:1BɮZfWHCfLώ˚/^:?0 D apdA2^c<5&#Nz׽,ߣ(=ƒ3~ m.mq2>4wNjLZc4_}lXȦXa{5E&M.d4Ơ3ғSɼ,??EI R#qxk. oaݏ~HgkY;1- uaIp.xWO1ʘ` 1M7.eJ*@Lz_e'/R]=3$`H Z~٦E^ Jv5&ӱ>qP03 |Px1R_:_ (9xa.&wo&w_L-c n,ARq`k%)<q(+RaDXATEI^BC͇ +43jzt[>ؖb4A,T1ٞy-..l(ж3U7m6U{muMê "Qt"a%Q*HJiBD-UbH)ј߉+/|Yd|WvaU( @2aoTr)c\,.NT\0K ) <&)q(L 0PX@HXQ(8f FN"Bmf -J6lXbpEh"UDiTbiGq*H(CX9wc1'YjJX,]y("Mo2er? s~> 2Ь0ݽ%/1(|ļT~Gnf#nGXë>MDAqlXN 8w !|2Su݇x13 fo_\7/@5!L{2 A㾳}暍+a= >EN4gkyȗ1Y 䂔>IiubRN[y9,9לlSwβ#LffEd<΍ϫ7hezB#0&/7q[ &Z|R 5@ $iy5^Ct߈r$k"n|RVvjA7nj|:nf9ݞL4Xr56p"1BsU$o.N\)9ЁIgf?p;0#IG~.;ձ}:WՆRԲR{|U&#O+~&hs|8igl:US4^y_n|ęo " Uv"̇V %SZ ׬QznL ]OcV`wZz,݅Z+sh^6|%vvI88s X?M'Yz cs/D0=Ctb{I+$!:gvDg 0m凹s?X5Z.dT8|@kĂBDc7T7v;W^9bNoRߝŢ['Zo*>+1<.4NDS%Tr0U*HwT{RXvaǹɱӵ/'=¦6s)ٗNu`z9/ Ov].ϥMU[x5-dh^uA|'S(Ϳ]쐲xß^~-`[t5||A&N̙c@Abk(tSdN69$^NGfjD5E(JH8xd V m@psBPѣ8ٛd*.8F7*?H: \ %fI!][o#7+^ m~1 23/Ȓ A)duH#+3jɪX*U@oLQ%2:n)}Zr޼>FOH.oe({P[TPY˖TG= irnRe+]0p/~A* Iۉ`!eRFI dWGapus .C 0*rZէĻ'P'XvfD1.$zaPda#A( $,F*I. ]Zc]9jt&A^ Ĥ!Bv\#{Z :TUVHa˱5: ͏TΏ>]FjX.6XY2[ث0ZpͦjMstG֘7kwx( Q;[^塤*dm|d"'y= @cy/mWpz72ڟ['f$]゗v2mn84->9@_wp-+aC)mfAv.QlXfj;#x ꆐ1+tuA[w Z堆'hCx>N~IRAr[T{WSJ+ڳ/lM ͙ëv'ퟗ SԭBU+(Ʊ>®ǚ\Q_ <0 UDƲ ~*XWB2w P’u߯6n8$]׸(y..z~_g.:a-Nitq*>jս9K;ЍpNX?Eח.rQD?u/:0S'r8`O*^.lqu;x]3Y:]&$]72)v >J%C ocLPLWgB$_5 2!).۽M%i]$Q4*ʃbg4 Pǭ[~h0^3Xq=nSDqQ;<قJv\aHaٞ<ֲ\n}FZRӋ5%:inG-yhF] ͥqJK _?#3>Npjd8~SṜ,9ZV9!iN9nV1W߾j+W|v!i5V̑ !9i| w., N$I;=p H03!_|d5Ϳ c%*C9t 5n쉢TuC_;))iB7 E %S<}?j]e肊(T/~ocEUX@ZBo?^2! ɳVx__7vG÷I <3n`\<+JL$PXbBS JQD,}I3e2G ƈQES}rUͺ\Sʨ9QmJ$+N= T!]\Ԩ=(1{lWfߕϞEG)(TJS2h5A( M0$4%\jxb"[T*ByTݔl_8G0)՘ D3I4B"CDieSo:kB(1~8+NoIl~O ޿Ofjtx>LzqnQAn1M⃪},Fn1/OY%+920_V#{Hl_|8]]a j.OG 7iyA0YWBzd4/A9[_=EL1qvl4U{]3bERЗ\clVg2iKǏ{󏙿7WgMīT OA&azA>~0's2|2zP QjD,N)Uyl^DhAe"5 Ccy_sLbnTYU覷\b[И{T c=IOuM:~%wq+D ԥ sU 0)m"wF-J:4fuڇݲg05zPCB^V)pY}K !*[YZ1ʒuE’z\;t6$C$ˆ% K\ H h'Ja.0RILD$ 3ӈaNplj˽<Ec0~4Rl,-˻+&z13tW a⫝̸PDP2M2JH$H(Σ("nj%R$WE*q Kq ;擇aC> ?Qd"o#cev'{|q櫑I ,do%#|=?̊WGa?M ""@8(a:`A);w,ij񒣿G#hool."f\{r V I"(lNiU7 ةjPb) UPqJ F)"xW9 Bbw B,CP̬۸Q4RbǃxX-*0 Md&x:y,~6Z`ٰg 3>ms8eò9u N+ow%mްxqRvW E}^}8ąڽ߃c!W_f^n]Q{TxسJ ( bquΐ eAlG'Xt3h{o+Wl=>t{0Vش-\_WxWp㖢R3g Pt˳]*yDY9e-K]Ӻd,rPq;,И3m5V`wQȘ4_>1ʅ8ۦ\dsb\*]gȍLstco*%cP2 > ēCCxu,}~ '7*<0J`>OAfLMi j@xbjk # Rl_.̵ jJT kT 9R  7Feo euuO[ZXJ:y*V1R^X~ l7%9bY ׵S"EB( B 8" ״A6Քռ> `d8*`lY_yj|ȃ+wpFXnuf5\j>Wж9qTࠎ\h]S~I_([_j|Ch~qZH-ڍA <>0b եDE)X`F5FR;ە  6;F%_[%~+bCu,[):rNHKu,åNu,:_!piraՐp3C)WgSewL`OeWuL Ož TK|z;rc]:B|H.\,Qɹ(ٺ!ҼyPh*9u l)?4 re.dx]7~l/:sU=2ziO]7sHJ7N˩͞ #~ɳ&ŘjO mQu:Q ߿VjhrR-JUWrItRd vlј)-UQ#w~]w܋q{N|fmwP>^![i>dg/X5n 6yM//S3-4lt!Vܷ rũVRáFp(n|ً֭u϶GjkBi a8Q@)vdZl6OP;VmDz+I|gFlQbLvj7dqM5O+yrBA@INddVv Z o!\.jǬebou[Qh=v ɧw(K(Ir:1VZyQ QS]bZ|y `m#).Ѫ$RsچGR6K>ظ"0(b$:}6aSgW/,ZbK`1B;6CFA9. D7 |\~SyY 1)$*RJT>90"1ǂbLp~.<N<&e D#4JGkGHEq& +JTV( y .Jo&KP3m43є{gƑ""%dQ&b` }8D *Iʤ mԜn$oֹ^aԔv dJ9\7@s,-y4C)5Z1H&QF۰& k"FF$qJ2&Yɬ+l='0OG|.>BbdgٺVy#0XE[\*ëfYnV L)?Mn ӨhM |zMA|[0?-k~gdڛA ${O?RfH/a?K1L+L^-Ei3iѨ\wQ $o.qN*&B sA67ul0J9q1B(^Z$}9tű/Ԗ$,%Ҽ}si -s"eB'$NP`%HJ0YJ(0EaDb?FҘ8< 0_DRLEBL  4Ŝ"ĉHKCC &"iHV:M"aVA\P- اrzQM"oM5m [B`3`JiQb; 2E09}JΔ >,v7fD=z3Lx:y,~62l3TVY(M9^k[墿^]xP]/x7T'^ A2ZjYye2®̳bЅJ$@`SP/&))呁 O븿Uo0N$o\l17_L|f\i#n2o?ycOn1WXc?7 oᰗǓf"(M(3X?gA4NVa*{[1 e,È)9Y϶$̹#D@X1I C@bL h6 a 7t>a-yXlQńX}䏽 ̆ _<Cj ,"d6[C#RJbM1WE iBLwzV&1?"0*X$}gQLM,$ %$iE:5njq($$Qh"Z o1X@8"WNRiMď:2ie; ~fzVo<}ZL͜v|mvw:b E+}`{q{ |A@S6 N_^6ΓAnΐcxٰx>ͱz VxK:~ՋלWaxr\A/X]{j.|;>]}y iUÌIMO0gḾ>FX&ۡJbVS Щ$ȧs7Ytk=]z FUQׯ_oX!pY.1 |n[Yjkڝ{-ZN;H+:BJ.ԭ knNV>RD%2Vmp<3ƵѥM] *qYzS\xEN57qs*W^u2G 8*Gy'O^!V-"RT`UWyeϺVxeB,ݪ㕏Zxc5s|rKc/J%4*xqI'P6iu H7s2HKJ panעʊ럵YRD. IF2&*L '*qIX bEF Xᄁ_Jx= !WkZ[ҝYkDNrO2nۭAxzl 6.mضy4o ft?wfzLB `Vesayd;F>sCН)YfMf$jD~ƱLyLֶ6/j069E3;+򞉵\zrQ2zC*@r$!_V)`]cpq3RKcN^F>W.Owj KN\6N_NL>𣺹g-FOS`'|(tr<}QPsdt`t/)ܸOrŲq.g)ڜ쳷|BXӛ`?&^<-|XQ2a&'"Qۭ(D`"1Hq)SP Jˣn#tfiw1I`82!0LaӄPTAH"'a$$b,J1eTseST 8qN2)`$Lš GZrEA[K(  H!FhȈB#!4f8s㜑3pN"/UXH h4JE9AT<cpc%|F!v'iB1'e@JMJw H)p'6D+f^P`*'N*+;+:nOƬ{Ij 8K\ 9sDOGHH"RXЀhB&= TG,J *Pil '0Xh _@r9ܔuYקhAvDO˴HX̾| 09<! S&&XX{<% ~;v5`[n~D9$6<4 r=p7w,B5_ kxH]mg-}Cg=UP9q̠/a-6s}cS2?CC7KtLɅtO7i+rKD m :)lBQTo?LpU\#OL`H;9+sHD|ŕ&+=#PpWV&+@7dTsi8 (6K@G41A)VpV t2ed-+ZvL* n;OOoVxkVpW&XTdMW"HL1ȕA`3ctO㞣}5B׎WK0T%Hoz'8fJdGnA1UXm) "՛qz)r)AYظ`lӛ[;.`tMOhP9O(V1ݍ]>T~;2B j*%uK攟RQʛKɩNɟnh>%@]w=^%EXHw> F wHUuET␥:dj [Mhq2GB:4JcX`'} ֔('TIѐb+P׋/37cf;ng QV Zbd3i;m_/h&$+RpEz 2 K8oGj|Ze"I#~GQ^T>J&y#.VIP,ewzBLv{rye -q gMEɋE^]mn(Th- 7 a;WcA҄ڟvH}nfxʚ_{ubp\Aă$ ol!) d0ĵ.sWNZLͯE hH’&*R!D!EQģ F+JT|ѺEx-9jWm;XVt_<.B)JwB)Bd~j0$D"`$& IbDFR$A ! !$UqXE)FXȄR2NByi`` BVjF"L"Ƣ5N+ٗxԊKh'iǯƏxTOr,ؿ{Պ~vW!,|؜d7.o&'AHJHr@q$D'q B1ad[wZo+)˄u[qGw-YUjhӅ{)j’a-6"RmH6[j8hj4 1AYXf>,z.ej0ɵq g19RKUrv=oCcb50+i^gO\*G$W*.ux-qBʅeDQX4JX C,R(54UpA ai,0xcE%> R2^Ij1Y@0q7X`̢/">n~dlT6Ou~LdodeL` yae_& Lq "FME3WC0)C?Oi\}7M%O]i0>_'oÇj|D  CLW U4?uf%j$I<͗S: /u\ ر.#/^6M>3Hϰ%h :4M>eሁXđ)FHO~f?' 3lX(*U5ERN#?: B{-dEa^ϓի8G5X>aԇ[`9s0uAISw..qlF+^/kU{yo\ NkFC43 BC_4K17v6Ex2%h쨽xBTh}ټ΁(хK_tjYZТq~]!K);X,pU&P#$\ 6Noh)3+uᰫ0^ yJ.j.V]j)t 5r}<` 7ۂܕRܖH4עĆeـΌtz%Arm9x8,k |eRSP_nl/[J7חEjݗ-qOOQ_6n H8^.Ebu"T0t8ᣐR>Ab!rTgW` 䪇?U9$t/+>5g7Zڻ@ =3׈ tlׇ֨-|EiH=-JpiQ|hR.+(v[ap_2xktcY#f(|f%*L׌awQ0W"PmVv=ZMՉŠ1t0vX@ J\΄U*I^jSX50$RK].KrUۣtw#|.qمr/E4HwVT 087zR ܝfeT{f)kۄY=Dl)l4m-T˜Ø[DØˡ%i:tVƱ;A+rosۛBwM3ɐjP'͐*,hmx"&[ޕ$Bew0vއ= l1=xzeB6ݒ(۽ ,(UY%mʌ/Ȍ0&Wi $ 2UӜF^ F}C̐VS A^)[Ӏ@Eѓ/Ey:$as[2*z wB'Rhdd?AOi2PY۱h%F"G,\ȑޞ1$1%%+%B'| ?O=%z Mu)p0Wu}2dDlr{cm+.5&ɽNO'#ɰCB$tQZXbx8(WСSs*]z9# E,ϦXRos>fAu݊#O8:;$CX2s!1G=X;0$F='7E)hLb,&ݖ1J3M՛=4ezSzf $wشfR;ޤRwhV-giDwg n$ڳ9x4OcAUg 1g6lo oe{jsZ˓OYVL圼{trjY r Vlqz)W=[mJbYg0bG<n9d ̱MPs(cPdvt oO9n3^.ϟ=C*J tD  D`J 1!Š5*:)jvM=";myFJ`h@&JEAͭ`V3PЎ+0'-ab8 Dbb9EEyA$Duc#t$>8Ġ4rXg0QQ$ILs&9?|I("A&[dcs~`UdjN5 Zj[5 I@G+P.Z!-84(AX9"X[IڒH}wa5^~6k:K-RljHN cA(SS] P- c`|qF[UFQ^y=1Qk*2 ʒ݁kxyGJrwH)ۇ t &߯~<럞_ jWyVl?G)d_l >yiV>)uU"<θ;q h \ :'{VvP B lA#bQH3)bDƑPcY/WNKsMR@)^llӜ90fht`ƂB{0*!^@gAJ($rTN) jh; tMsԲ=)0Dm56܄~5m\q!B XHVj@ B9l1¥n#c%߬~? Cy]MKƺ!&8l*kLZ gkhR8_ hz1^ڣ0se 8 plf.Ad8g@{ "C6ЫZ^]Rrro-Hf8%< H$48]wOh?58.ϑ1=1L1f ezPIMΫ1՟I|!֗To\ yh90RX ElXp;lKZ2svv6Weo?k`#J:8) 1.16` y)!ɇaLYߥsprR!ި4X-NkjVM~1>e6SLk{[qb1&r"jn\.?u}]~>}XiI O.D*)5g17ɹR?W$E{ k5b@󂤃^ N{(A5Gj8r05%{/'D^cr? 5 QD`Q3l&|7QQ;jQQ%QX(DdSڙʹ˽kg*{5=^$3;Sd3!cslvy 9W؜:/eyҿS=,Lcsk b8I1ۏsTǞ1@ïg,=1e*6)] dhf?4=+|(}1!dLl S>';nb=jJ]/UH>^zxRt#%P|ضDM6z;;I$(ֽ831kD4šj[עzx &IZ 'fʫy-)-.Lv:i Đ\'rDO.wOc jIpm-Ӄ41D1QCJbѺYUonk%oG5Qܨ^tGŻUxlBwզVVQ߭| k>u Xʨ ssfa9P~9ˏ#:mW`ƴSy+fà21n64O yme #2mzd s2h]̩P4υ JwAd5i9CFY>3dlb[y`zj)GZfhɹ@~5,T@Y0Շ =Βnq 5d"QseG/IR ѳfla],ozgc/յ缡xv2a{ba)s7NGn|@ aSCT5 1nu=i׍2u_Yojev 7H T/E<1Pp"QkXx%R_SJ]95;8B}E_2k1B*X(0Ogl<ƽ7!#L![U`@Ӂa\))z=| q4DJҸf%>P+]4gcnY{|3K Ye5%"NQ h!PFe6+La?F)e~5|اՄ>Ux~?]-uԿ'[J^oA\?=WrxMwjo??!'/o>?=pY"}yg9;6ٟ@4ru퉼ZV ƼS\ְ߹?޿w-\ĝoΏ d~Rb 'ro wSDZF!`R8bт @F"#CVsvhXnkF+ ;0:P׸n5oߴ 9{.KȬPZ &'Q{v9m!x+BZa3a@h, V@ZL4N].)% JAR  I:$+A\-wK0Sjn,>>29=2~d#F@c hOj)ZoQ֩6H TK!AK.n?2C5(˿<܈=_ NH-DzX=q SN%ן? @yTrWr6J>;ka DbbA \Q#HNqlG1 cbBP@ 31JО4Y(xK ՏT"L`5x%2YpLA 9Ô!$'HsENArCa0 x=G Hla0>1,c H NBuRn}Y[~awVӕӕߥJN My?eXP{^U0_B؞3r2_U;g),rq`G2Bk9|e'[Vgo*9gn.gqyq>[^_-RJp̎g{>3..,eLkny🣺[iz;ڕd13O=O/dD%F};S"A_ qjR+AnǍ斋h||5zPJǛrB8D0U;?ԯvvhDVʃ蔎D (eZ$շv+&4Wu'"LբQ#&ڍ!ƧJy1heöL%Lݺ@h4i݊3Y fžSҝsڂll\>w!n덑Z'!2jGPH0  x-$*b+pF"cZ֜)qG#9$J-(pVV 0 rFkMY[a_'gM(DTUʃ^:FcPI*Ng4n4>> Sm"T0zZ=_HSDA&exzl,N!NI՚\@ @ E4 "TBj y$1U)u:%jz+c kc"ta!(kD4> ǘag5ujk 4:Z2˿"Nvdg Aɗ mq4H_?iߓI?zxL/.Ynn^S+q<.ħ_~6fIy7{ltŸ7>]g Dbߔ%gou#\bSCry{PkzZHH*ԶNPeT=Rs\NƗh5''AͶEeiL [Ɍl@g޿W)2E$H ˜z"pS^TLx;?tBVεChHM銘]j7:>r_V}N xĀίv`4̤ppuqoiQwʟ;C;mp5_znekCh4B?|WXg Q;?KƔy~5I(HYi@0 bO>*.ŗB&Z92VN!{Fma  E-րJW'C+,զȇ I.qW`XיE_ ,F uЙZYxDl*y⟗OZZ1'!w{rN7R3Iפ8[Mr{#oRCʉ=G 6OA*VFJsocPgEv42JKf $EϛDtMg&0Z:3.ԫ)&񤘵bEe'4EO-ҭH'nO[ ;>E kPc!bnny1F})^lT?Nkb>0[ӡV@ْfc4qjm!t6L#{h~<(w]^'y[RԼqMq';R2}ܬ^ ה͚t#.i ;LI'`Pܼ?npNއnJ Ft[;8j2"\r*Ɏ2?a3:6y6$b|3̹mֹy|Њ5.V/^NnٮU˪a,Ȋ;3=iR2?‹Yz)p@iTb s?F%c*. JlnICYE…3*}]E/s \W *^wጕ@ NE#h!$.$Oĭ(}@Ib]Ej:Q*R7gkͫ]Q! ^#[~n@oI֝Ph*}r_shc#S &qұ "ז;G-hT0$Q&!JFXr eYɢbm(AJ&KjGb;&И3Ʃbڒ,"ݭTO9^lFWi"HZSGiX#=D$5 E919hk:OYgka|HBS 7Q&sZ[e á|m=FLzM2ZJUR3|f M}\4`o.o`t2 ݫI'ݐcH~W+9^+74{O=C'^Raٖ3ח{jHZmpjty>SAwU>WϤb| W^tDNgphjq] .'AQ&&ǕTB>N k#n *@w咳&߭kqt9"`偺,X_bAO~^x>3.R)= k3Z,2d&۲v9u,]^X)#FAego-'t20ԎItł-X1GPBg2aZ๤蝂RBzI%YAyBEj 2gdpk0[$[y1}P y>>yHDqRj$mNZsQjʴa|dDUAF=&HB;+ :}QUЈ1XEE *b*mA@My$ H6%Qf}Rh1ڶ}feN>q R-wXHV2H`"9^1֎3YOQBSSVN P  #5 n bIۀ󠺫s7y^vgQ찌@ʬu"\.}Rȑ}]C(KIL71¥g:]t9Pgev8F0h&sۆPthBY]CNm g;e-Z8.&H(UKThk VTC@]Y5в{ɬnؽ:j [ҧk ˯YU)k)noZVӣ;*jX] AvߵJ1BPHY A]jf9)B,B@o?|C1;'pQ7'?kr U`t{5S;(csx4% B&|"A /'P8o\Hm%Iwg˒Ѡ~wvf6xZ'),P0< ~WM6DB|Q(55$ԝc 9I.KI\U_J; {kW\sW-cΓKL@FYU"g@ yERKkc޷~JqY p& &kG?zP79/5kI$\M('*:;WMV)w5T߫dQ on'{9܌2zws8?>vu;ә,]E?yep}mݎ),mbq w7a\?`|TY_`?=ZCUj!dF(Ԁ$U.5JEVTV=X htӝ D6m ہD5I$$d;T7.٪ @wm6a̗v44F#S&H+|W;`m@xR j!%ky#j'),8̪^E&V,R,f 6DGp&i`K:^/e)ϤA|IR Ԕ^aK&D{1wx+Hrѣ͛В.^qBq 9]*)~rfvpK~jY ܝZ?&VWzƏFoxO3~'+q9iCGY:Cv_"YКI*$JIw8OOOs`@ul>V +g+&14aul%ӭx/J2)xsAI #*M.Վ)%[ ~ l)R5NŌbUbSa^ 7}=chb{؉Ҩ,]4>rf MLBr#ɄOܫ#:a $IaJrn‰:NP%XoZBϧoT5k (hE3M)-+y}Ou"Yrέ΅SW@x%YȘ.HA/&F I儑HK? nцΔ|0u& ~G|)xCq/ 7KϏ" 5tے@3Z4ӿGl6: yMi`>|ItF@].OE?6tZNvvا!ys&SX7D5Ԯ>jk\]S:/sךĭ.pwg:MIBCoȺwfy_ \n4*8o>L꼷usQEL6TCJG%\|$*_E$k։E(dBvmqmq]0ؒC'X~r$ aBϭa^;5/§#KRt]"7ɝ4ME`]`-Y{W,Ώbzup7ڿbA\%!I$kjKJ]Sko]Œ(<"I &LE.6bD,*☈CNJ@lfnrp Q~>d20Ai3ɐn~\ /At^x^fMDWaazYšu~~eC_/JK{mH׃Qa%4)FugT63" Z(8]"L)PCLcwG\r;}}Hkt^ݶ\EbNml3>9r⺴QšɢjyzR tPYRq٥EK^}!Zz&(.{]1BhM]tq,oR*GOfDx}w/R:,7Of~37o|7붫/8ku_vL5'^0a~?D̷/`1Y#Lsx~ҡ/ҡ/K/4,JXͨrk)7cT:J4[nx$<K>yf>feؓB^Նn{> v8ceb[޻\QRU])?JN(gOӇWL,/n9emo2z>l/Zj` :HÔ0 Ur]dD)G1Q :58N{G棄 xRF(kVbeaظ80$iaIjQLiq>l)gLG%qf0A/~Xxճ LT?S00#Β8-Xߙh9\,#џ%hKtrx/ȗ z|^I,rKB@οJ\籷o]dz:^ڝ(j͗~XxqigO`qGD!`$IIaקU}4_xa,BӘh twWcQ>F!A{,Z0Mj6/0>kgO*5c}CN1Q-98ML}M&Cu2TFM%1;M?^)hHھacIZ,P?=Cp0hVJQu6wlxUwl8t++nݸHeTvѢS~ʝZpCvgܱ9=_ڧ3XݠŨ^%E͢EhCM0K8Ezޖ7C- gnuۢQp8 2x"Dd휷5 -5 ^6)"I}rcނcz@%X(8qJ>J#ñ2)!I)iI,{3R*4rpP^KA>ؠp(]UT`U[<0ܿYcKo8xQ:n}{I:77q9b^cphAyU|,RUR`g+B g[fk"L7[0 y!ַ*bNaֳbaOpΟ^OP.!b"/[CA.(E!D1*Be+2Qxwvd[UngEi9!G:3uB騲@ 0`cFqI|UϹj;y9kܚ S 6+tU đ fXmȞa(Mx?W^Ų7Z%_e`4-ށO?ۙ`Lm*]}w={&$Ti"%A%T`%:l1T&,A\V0[̍1-0ġ4%1E1+qLQd>Pd18xN?RT;0<U$ I[@U#lEB*:9P #%\HfmU 0VHHީ#jU|5w0ߟߞA28ٱ?͐8mh%:]JlE@HaY ~Ƀ2qCQՕ GA0.U;gmR^`_*lTȵ$k|qd:9l[3=zqdՍZbT3&=R& HkI;l,ˈ"zQP@vteUc:F/[yKW./י\9R e3he eַ< )$G2\*KAhOYSeښ۸_Qiwgi~IT2I*lpU(Yd{4ISMnn*KE*,8PƝB.:8Bb*"ؘbXc=TڼKs{ц W UݷWYzP˚ZU!Οt,[Yƕ?KMYҎQ9+'OqEw_%D J ^jGF\3o ķϫ϶zT [־vH*_\%"xխ' d=8(„f[C&B`£@ (6.TOKKl)bN1Sn%L0ȩ `|ؑ7ߢ+,_& ?&wV*RoI?'TQOzORan&:85 #%(X&>SN"ָ}T! %~Zt X$qQ%ZL9ԳoMѰ~u~meǀ`}r¬J$X1̸Pq% Z0%9OUG )ai<2!Uu8~z>A61G6^)TUaF؉0'p/e@zϼ¨1T#ouj+Fpx `JĐփRC`k,l7iPGZĝEZk RƀNß+!mNVt #lKʹ3Վ46$"RI쩌x|M|Fo2݄9=W !*F _kXVisAFuV )4G'GDqד*2΃'A[ (0E # pr^<˚Ճn13Dk 6GYjV>#SoPvN*+ߦh#=dFז9dIIAߢmZM?3@߅*ހfHfZ*Q}[?BXE[@AކfΩ|}): g:] jδl9%-\ҌۓIC_C!hekyw)n0R#N 9`j&$ivh4k:LLAE${Д˝N _)vwI5?hDw|&%0؉'sbBܝ n <5s`t0:>A+ e9YW1dvՐm(١~wY[DȴcC<2A2f81c#dG3N?R09 )e/SA 5^ q#Q!Z;##B3QNQ8SOkL^^IK%X%,o!{bg-OYM#% wO"JD "?3:(& Ъ/@@m!$!&Y_ rfA\݉pCkk$?sr~3/Y}%oiIXw>%%)1\sb;<\f||;ށa/0$DG' ҁUͺMFP=I׷0Њl  1a=CeGpLߴ5$A 3.1uZC#-dwlu{P$E4U7]6qTF7|4Ip918:;vŢFɍ,=׎K3ZAU _'aL:Iv'Heϖd 2^gs6C/@wY~vPHIϪgXXUR_pM[V|xsr@E9>\eܿ1leRN9H}S}o0[*|#B{k7-2GSmrc]$݉p9{~0Q,MҐ2 *vdN }{rh+f<탘~3br3&yO%j A:F)Cz \K..ml2 j7LAW/Q2R]СPjIbhs= ɵqzk&.y0d"}Ӕa(>V5Ӯ'VRSUK@ l'Z:h9l؞K |r}e[y4`w_<ݨyJa@r`1ݺjrCr%jE>>]݇?{af -3O2]8  0.^dѺMɪM3gUQ}nj>Tf=um*¸a_%^ :'k~ \KF)xvB{n9y?]{E$ncZC66%Ճ .Eeuӫ K!C̏UI4g_fp '^WYv”A:jZJJ,=hu᳈q]]o2b?ꀼ17 n.򯈬4O:^;hbսM ԤpMߓn+]qZqg= N헫dRN[<7[4Č4cdlhz.x殠CY :Nva{佣i;p{ Fo=^&w]C+4Ļqe~ 0HÜ[C39{5=N\ը?ުC$mks0qOw뎏$&^|t,ifvN"mΡ ҳ$v,m{S&%/\u*;Yr"iw%7a`nswy Xk>,yܠ9ڸ=[RΒF;f%CHcZ:K&4NulLgɉZwmLɳAiZli=KV Mx^[,]uMd#Y2ISMU;,k=Ny<;_tv(8=lE ")qf1d0L 'lNlFߐFȣSTBFG$=9ЙBЏ.?G[*F&P1:oeC˂RihHTs_w4Fqе.(nĈ-cR]#pˮ5ʕG$&"T(hk<Kl#'t2za^}m2% }=sJ'B9 <+r4nх蒢~AU3a<`K9Q`WX6ƐRǠ.GAsnK*wh){w`/5t(o<%\FlSbTC9=f T!fH$q~ ƨV,.A'z_]WSx{13Soo?/pF56SG b~/4|./45>Wn1` [?~ỿ7 ~wW/z7|z@A+oƌI0(wAbm173Icjd%uARbDT~no 0x߿n}ۗ۫[37d$H˳ץa!rRgxFgxg ,"ǸD3k9@IjɞTJPx]-;s?>w]I л\~4ᗯF@k6`~z]  +=5z;]HqFɋ2 T:kFU\8gh=Sc@57X9FAd=feʵEeI#J u\ 3*>.ĞhAT* @ూaS@[K% ihmG@W)CLCbj!ۿt'-Ar&p]䚰LIH $,RRKGi{'~PW!ɣ# &Â&</az +GFb_-/zxǰune^ckY͛>}z\ԕ0a U9 Ft`^LגK̥(MWik6a#8gUȃdui & hcULXŹc`Lap*%V&LDKpLMgn[*hau‚H^ cETV`–E,rULZ0>"#I*QUEB4Kqb$%EZ m\up#*AEʕADp0ULZ%aBX2&lEѪT)UT%~Z ׽f- wf˻qř?FOOv U $4;! r᧯O~%H*;ۯ_ ߮nt%AD4 .۷Ii5lo.Sk?S2@)3JEN[l"Bd4?0 ޓq,W}bIuW!$F/OD*q:Gllv,GC|7]W`B{.`2]~Y=wxq5beFE+3B_旇iW_ſф|Pw!«ژb,.xĆ#\?ϬJzVqQc|%~H "bu&;VJ&SFvX@iڮ[y«vBB^ɔdG Xn0} ڭMDt6pS!݊mݺ.E2%Gֵ[}[) Nm"SF޴[yMCօp-)PG_nBҠ蔮F.yg#Tu!!/\Ddjۢ=fֵ1p ڭMDt6pP9X޵v+FvBB^Ϫi7JEܭkR"TO#8ցp-ڀcYnJjR45hVT!ONZ^fM=v3k/"<^F)IY3kAˬgZ;N.&(ϬNw'YT3k}fӝ k}fϬupr5!3}fϬ=tC#v?^X'*h5PI '?yQR:j )mHP|$ZŐdiMYFDy$)HhelNF5%wHTU@ERi@PqqL 3B/" $fh"8Q]*tНD3FS|nvPFzdIh}By籕wxVn$&o8[9@GJ@(ti"hbm)NxjXCRD @J*q>d GiMY>3,,"5XܣLsHFo6Hx;-lm@l;Y6﷠hB>ۺXlt*ФJ?ymcm+ A>NRpj+*mz'01b6BG76Jm0@10- !A/$ʿ8۳(0L X\gE5"h9.z;E!hJ̦E+ipO'~|gf:YmeH $+;_/3~8lzb Tj{ic`ǽ?ޣ͍=ϓ_=Re/#s,jaUCP!= XjG={n_V|sh\~5T;m7[TfGQȈwq?4T Ϸ CϛKki7hCPjE\ ~j"MB]9bH J紡)18~Jw $DFXC]H֐lD}J}%xNwa9܈B* 10ǹ HR#  RF:ƍx@d,_􊔐@J~t1Q19oHD>b$z+9*  yrs&Qѻ=>gߠ  ~vv1s*/qZW ;7T+z7{1O1^^'lVo7%>=G9(y&k׻;2 r._$y\"f7]\W랲hH1"ō:beڬ><;7ZCgM#'+I[WGV1vC(!G GOIHUQB.^ōY WWpc\M:wPv։9p9/f]\U@NĵC0?9jA9(hQ27e+bkӆ[@tMY^i7vVWOYmQ%${t#a(1^lI.ВagN t!;#|+OKPw>iI.>ͼF{.pUL̿KH:}z* F;EYp A`RQ6YqC! k-"[v+Dl511|1K#*i|4.742Z#CbԡxI TrؘjpA!2[ :8ǜ. Β%8*O\[چ c:=2P Fr0G^A=ڥ4hp6Sfݞퟦ:.%2ekMcn4h":kn|Mmݺ.E2UpvSv+A^kn"Rr=Jmc y"L)!@Ȏ;4:M7lhHACadr/v=ac$ɅL@>%:$~n g $-N_GZTon^ 8,E Rʹ‚Wnh,wYw7f(E}T{aDmc @j̎AeC#$`u~S_Po;jql{qnv)]@}\yĄ{TH6Rbl_)|$&6_1wqۆ/5D.#!&^ڽ8~^.Lj}f#[ͫ"0%% 5HNN XCr*@%U-ѹsgeм WXMX:v/ p6Cv>( Ͻ*;bwzD7zct_S]흃I ?QsL|(MN#װQr"TA~ܖEO6jť`H(Gj*`udV㣐S +r[<V 0ϭR!Yڑ)bv.rx(C D<?f4H$xVTT2(3.:}qV8ݒ"n[IP7Qé }\1 T PAYav 큨$TIS@Cuq㺝)t1vZIج>}9YAی%ɟ-^@3Qr7՞vp!!~jݺ]6h$o{gf0:6҆(&l#~ϯjoܖEr0D+Nf*)u>0d[K4fKrĻ[nGZO+nߖr!raCF98T8d 8:`ՂBzsMe&kB!$eaI%e$`?e!RIDJ>N^BSBvueFkuvy}}V7%< ܬ8VY,J]kC*ggI{8+}n^בu8{%FÎ-迿RrxYĜroh$PsדְEtMuQ]S#Yo4C$?U?c!ZR%K ڳx1Uz"S ̓م[``=bw+U=sgbW1VbcaFѦV,{ۛzdDŽݭE-ؖK *,Fɾ-=!^wɾ}5jD)n`FVu 9[-b7_5|&ۊDIvy[kcnfKpo`m;p9U 'QN2a7hRT>8OSQ% \f {ÅhqdiZbJ"Y9}MΨ&l&DZ'aqpn?;P3td]/.33a+=2O0!tMtt+Z,E]Rؚ^؂cΣMB0 @0dg$@2iL9qG1VRg8vRɝ˸I:8+7*cNd"NY(o^ oZ0aT19y@ d\T{6:']pZ.7ɻ)q &ckRS1uy7Ɂ h;k$s)-9oN(!$ _]xUDLJQEX3A4:(ЫwRLǜVlEI#՜d3bhɬC@H%zzekBO@{N/F#v)Li~Y(-'VQK_t +[wcBcML+:j Κ6bi ?31Wd.HqCx ٔEm{P 3k!)9Zp<wGY-g&e cQ]J\䓩]>77Lg 5;S#׷QB{sϭA`o^ao\\w?]\Oqkcze͞}_frqQV Fg@o^\YF{0Qoe~i%nuKᳶiu"ӑQ./Q/./ʟsM =愒o6<4}k`^2V1vd=#1e3@F_ v2{+2Y)o`dk8 |J[CGrD"P+M?XQb!w#4LN7sK&4eґrS, Bl ᶒwRTf&-tpw;TbΝ5qQw>mĂ8XQw%Պntʞ7 Kl!q]k(UqrkR7f.mJ5x*Qފ[hls$=O,DO&nѾK6G ! @`0M+U7Tѭ/TW:V4&q#f2 C,Nd+;ϵPmR뢺<[a4AR]1c  pc)SBVj2-mN$e{VA5q*N4Mp1zå*9܊-l #d=Nt'*l.J7'w[3Exr c[sqV=uܽ}1S I1% R|Q8I"Sӷye7uJ-[GQ | ^=*uoD!&n^B< lU01hʊ/HuԳ&D?:ՃT[8>!jͯ q’]`1cޞzd0Y6힟[[ j-ΜHزM@CFy|9lRgn0i1_kR5]<zHi1 O\Vқ4[~@ݰd_] \(Xf awIڷ6ƞdB<qA#9GHFY1i<T %ٺ *5PT\F0Tˣi9śX`Pǻ(W>`) (# KANYj4)E4s aοwr6=疕8+x>Q}[8br*Jؔ&:%2Ecz!*T0,UeTrZaZ6&%|F_GzX=~;xlԄahIh% Ĥ"h~:.@qZq"-L2.?1#epg,3ZLy CizS{\,AkB)b($?ꦼ1Mm>]J\₭H>]+4JxevϲY+ 7LY2WJR.o9[[>6\!|jEF/gy؎oD-]*s/CtLלNXUW#dqfcR8$4QkQ4b\]v<ٶA ӤpNbRI9)a7hRg A tNcx#0"0 "4yNGlhfr<BdY{Z*MVwf&v0`#磁|eKI+=ҒM"cXšJ|*7ʏԔG!)x+5vג{uй&djl@ۤ >6JmrIѪ!L9*3ʃXDN!#%Y1Evʅ͙r*T.(+`g5Y'`9;@$^yB6#ܠ22btgbT$NeNT$NeNSrL3IPBXCSQƭh!8:&aucºՊ ^C:Phgcz^OI\z=)/I2-|ݨπ FBF#,!3 e>XK,uUՓWWYx50_ץ"EWk, iYeI-kZɞTiMT *0@c3A8Fe^u1vqfBsWݥ"Y[il:P ISZ6tޡI'iag޴Gg_,jSha 8Dp3,Qoo +dcm5TD3Z>n K4 IaZ5Iж܆6gY?8C,IavJQtA1 P" `m,B%)nFm_.Mi&*[T6Vt?9.c3 ڽ\UH8n_t`˜AӦ&0Du|$y=>w\q 3ᥓWz"1'F3x[3}L5N֊uänM;'6& !ΑZȻ H}Saf:SVLQ"͍$ij bK( :`Ҁ@4$Hq(gS{`i&h>'r᠙ds2r&pw>M`YgMNIL2鴆֟O߯\n+!ގ[lbm{P 3믹!)Z{wWm {f'mnavzk=3Zg򑩭S戗yv\ )"%@ )n'qxw_*D &u)/+s-IusT9y2iZA ,aF-,<~2kj #JGW9[oh&_S'O7Hh,9,y:KjPV mak74 ^'o6^<<Ϥ|˧ϾO˰45VĮ[?wsk~_+@f58.e<=]·,* xi+ w_/[/_\ HG އճ BMj﹯#5LCB A՛M—Y/k7=1bշ'e{ Xw}{դw y _^4б#CEX:ҳh4dIt'su: cϦ XV'8(,ʯi*@ ނƏP'of_a+߾/0+?S7}-?~wvbt>}9̭B$tW]\KA^Yrߖ]^~aқv9ftEi0OP+A Eiɧ5(0QjiL( {guMGa,`gg!1cQ޺KDVC>nߟn߁>0*= [FN JL w ݀ŪBD9hsҚ^5*~VԚ_)Y;I ΊkZtYu:1|־u:H.͚U\j:E2*#iGr:<:8  q_ʻΔ}R=ʔ_*}5ՄߵɓE0Ͼf7Fˣ&b{#h2N|V߷Ѱ64q.dx9(qpKZдLAj贞w;W9 -OZӟC&L=1ix+6>8 7qGWT-G -jVίW8 0Y/PeňCqEYMi~n5#Ǽ1qu"gWcoMrNKOxvΎj+UW Zٸu>[W/Ju@HqbZ~Qq)W9jBL$icu#"K @*\D= QZ $n֣*bO3+ ?B:^ tMc9-8S4bm?fS%}`"[XULQDML񑹒Lo%QJs2>+dqc`1n 1J`ތEq^coKVp᪖uh\ª/4m ֱjpI*UϲTVHsJ[h2:]-pUEf0pMl[3rrXɈt' {k"$Oo9WPstla%[{%aOOڱf4<k}'Sk tsj90c BcM5t(7Q,5LwАY`ԑǧբ(B{t0! S=祦Zү`tVu&ɇY喎s-Y˰K/TfaypНXlhQ'MJCq @)DC'րkdgk_?/o:*=h^ `d?9GnH(6ۋD'`qk4']\4l%wf|G~Ϟ.>}䭜.nHj3=!_mӊWl+BO7hRlqLJ&O7]ʢD;VY/T&/8tC"Yi׍ӻX~ZDC75 ͸ c 8*y^d4- 7%Lz1?¼dz"MzcYF"%CgJzEGp^c5q!j[ڤQ&2ZR tXWfW,ZY8k?ߵO8k>>,gs۝x[$xmwٕ޸TlS$Q#Q*kʤ+JVo8 0-JZ C ̛L1r6j A^7T&rv,TTJ\VGL4gQH: pWIN-Ta6Mireّ Yy1IZQ̟ù%6dn$mA1R-\d6cLy3&,]Gn my0gEmLYEB\t$Һtz@^ddHDDBƥ9rXGgi&ȑ*}?0Kr=^E#E<8ϣbRH'4V|t-tuӜj`Ct&g\hϩs'!Zd/uDi-?ZPIjz58N"-70CiQp9),jnH5|KO_UVȎ200+tT-sGL2QlxqaZ~>ea8:cBh3w8ڐfQ|,-:s;'hHO JqlDDF~*3R:}TNEįWDV 彷>u@Q쁢AL&0 Y EgH˰ 4XbA1 1p)\i9C o J(*Qt ֵ !ADm0;+ŦJLTIl0r4Ht#MvP0p p^(BRʼnS F!!cJfN4gF.FKe5 c ]z5 6 TQpC1fyN]Gn|@H+f1CɁR)B N\aY(EܓQsʭa`c*M912^ ,JvM|M@S'hd2/NZӋx WVL5"ᬝ+ ӳv>q懋9IVe)Y$F1ƅjzQlX4$.{. ̱ji LV`v[L-`^>y2VKπ,(gVH~+^m BX|=Xџ%9-ȷ\`atֺCSt9M+-&lhҦX 960^{my/h;@}Q* sD;0Qji2̿Kέ:ȃ&a; Y|ƌEPN"+MnehU:s0뤱1e0YbZl).(2X&0 1 [a\T$axnݮɭ 'oH8X vrӿTQ9` hVZ!Z.YaD%_8D˜{OƱ_!!o_ R}zsbÎc,C閙P$MRmA}g(rxY=^ljeqf8utuUu! UBNw0(TLJ<ޝh|:.*w G0(]FYH=p%g.ֻN\λY($h/`Af_3a7GIss ly)4MFi*"(Cd;5D!Ͽ|e:$c[kI27~|rd[37C=I=BQ*Z*Gߛʻj S7񻏭3|q$ѾOC u a{\bIyL3G{aB8v=YaTd;|X]zn+O 8 ;ƙ;}<xw?(3lpߓ!||rf~{>\w.n<\uƳ{KuqsP6nmSgݞ%s0Z5a~4K÷OI*q7Rv&ʵL ># 杤/zmtG4{ nO0yd'ArmO+G7}ixdnZ؁\_9q/Z՛Md٥ ^g?Wa_౐wM>uƳ{M؂Oi /ݎ~T¬:)>?,\y ,niXlO?v>f~;O/a6cGX ͯ3)\ 9<n`#ts3VA6(f$>?lXxw&8~<<"-^=}=L7Evo7+? lz]A,?%ݮI߿/G/z4ps޼~7a6YvsF/w,\څoN ԜN/36*& \LC)5wX'JfR UP˘{O_EԩiujZ6i}`9Č${D!&.DPKoVNZMS,*IF؎JU*VJ? b"&=&IN' V# ikL{G'$qi"HElZ)B*ۜ,3!I*!UlaQ20BFhI"dl BH 8aهEɖ`Lj-+}8 ީ0" +gqxk+| jBtM1B6`?ݒjA[@x-( Ic"R@P=|UPV0/~w]N *L/2 С`DJ`i񥃳koShK0&8t"KNY7Y"S$\3H5A%I4փd.Dž,3HT4X #>\TUJ2֌%t0C,{]:@bF+ i$#L^ׇ$XP(z]!B#ӇE8}BbH $@@ܾn? FQ#`FݎuӏM;M|G@@@"'oN.&b wPt"PT{TZr0&m 3*xɏ趝GKn lkXLi_l+ :uʃdzE`ZSנVg1F)Ƅ[jaHSuN!g ֒9BO<0(7 l{w.tg3<{ Yо wq3?|'{ P?8| pCw*߀/ƑQXQLdW>#4.4ЬqRu:x$&x8QI߄㺍z6e,ۨ嗀zYZͪtԸS {9B жrUn FvmSUGu[T7wv}!0a(AS $N[0P1n j`)()r0\:'[at'?=bj4u)hHDRg (Rd%ͦ TSO+^ q Vk02FJw2s`$ژBq](9;6s?f90gT+CX _Z}UA@x7b9/F&O;EFuT r-iXxhf s2l(,B9K= MJɢXt+'!rXR* 7(u%91܆I=< k-1jT8Kr䑢kKkK%dB;BN-ej)SKZHnṖVGt꺞ZZtz:u=N]OSөվZXOZ2.qD.b_#s-̰&R NzotXkX3L|~J&wS4j7=t`3ʋ BR98 I nϫ`lY]ku3DdL}.`ʼn<``v)蝶`U {I&$᜸1f1U1bBDQ}ui@%qv[f#0Ÿ&߸"$fva B#SEϋ?/9x”q.`/΂qE #IÒ$TxAE\qjyߘC:'ۋdZ!GHBb(lǏ:)Vha c0-eIJ?% SFպ(e(\#D"XvI]2M=^ `{y\0]}*sU^5`Y{H[d4ԁwDʋ8l5Y'9w[Ǻi/חX99moߓ5<9EHgu ]'k%uGKH Q_0h~ҟ L?gۚK9$dnqRCe~gX_J2ZTOBW8Mo<ޢun9-2UMOli,1a U^]MXDNX$% u u?OF/Ww)Uj㙵e Hw`#vÏ+\$6}? I% BrWyhO^y@F^c<쬉IRgljNP(F@ER5 \ݓE"~SIoDh\d-)eæ嵴@"!@=ׁq|F]U&PbWk Pz-lDB-С^qW Sz.E ^-Cӡ ~sZZ3AVbjbS oHJS}b 1aCZ#4q"]P mVƊ!R;[J8Z 8\ .%QvP}y9KӔ#FHqSOa [BcT=\􃱬+ԕ~4: T<Ģ)f{ȁDe}(P8Ak,|©)[C= y{~Ul.nV2`aL ԃ<JqT)KIV@&ii@KR;F쪶8ŰL"-/#8-NfOl=Q22\z֒@d\Lw&X-N< k3#)8;[1 Tf:Sp!KMIH[5 =&',W/] Y@!ԭhoP(ʔP[]@iˆKwИipVm-n# Ta0+*vg vx6ܩ\^S=SluN3݈!L -q_x[ ]~FN7B8Ŏ6ϵF'&Eq;$(uES42`em.NRX вo=Kls,rтTF^ђHt'6{J*ܭsCn˫9w,C}8inI U&}գZ`Ly-*Ϛb =/Z=OwB;LlO~2'Ol աBl2‰o|_vTvxywl٣ȯ@SOs|yBlR Su&; 4[=cI H 3;03s xwxx9j-h`@ae8 7罏p~?{D俊Rreq\1 ]\z [Fwnj-hfVuefU>7ƯS5X'O`2`I.qp:;H¿}߿={᛿yTA$[>,oE~ˣC78d,T^oUe-@Rn.<^n8Iᜓ|}~k<:uGer!oqSf,]6YV,o^=U,;2VkpLZ`5`nmˣx1Yy뮔W_/J,/@.°6qt7e/|Lg=&ѥj4ÿaW$er"}ʅOݩ$<ٞ!Շŋ"-ƃ34G)Vxbp24fR6;y2x PJ2Y)_ۜ\Lx~p8:|IpÊTl?ǣiRc(ET~IOBQasy'iڗGo(-?8v=.aD2έjid䢘"iB \ f0,L.R_84ښ >V(78,^n^lMR\8 ~/odyh͜WV^P)- X3[ײ[YX*$(JmDCLx5kk[wŭ^doC47*#h LRU4YޤypГGV2@ Ш [סCB0`FG-6?\mZ\> cA5_>KjqiJPW˅%y3A3mu9R#mZk׵l^+sWRdz*- N;p鈄ɘ{#CwXJiY %*a{mtXZCC*6 Š6q+h֖zl.)X6rؔз(l޶<`6ӊg6_ 1z ,q:%'C"48 ; ߛ&[^k ҖN'Hr ln$ J=L7 :߹ ; =e1ť ;6MB` M AI6a#MpoǦ\R Ƥ)$[ M?!1RH FX-U5OB:b!Ur#h9g"łB-j[HCPZܘ-+L`=njڃ7TռgvwoqRdg6Z߹W3sT;6UOϡ9`sF h}7xtɕ(dl xt Q+hL,GpѷcbW B.e&WnJݍVR Cd_[ ;,Snaհ #h/$% ED fPJoc}5ia-֗6 o!o@t)]xۅ]x.e ˉo.mvaK&euم.T syjYjgV;Ĭ .ٖA*ދ S$M/mѲ(WEdzxW]hNtaKEiH4U,ƅ:~kpZhP vǚ:3V cӹ}e QA k] 6Jkle]h>]lJ.}kj-a⚳#4^bտ PBIXKpDb&X 8J@WC۔`-~2x'TiQ^(8eCէF%ǂP\ E^_ilS h^Kp ɿGC xk=#RrLIA /<14R >pT7H;߲-;߲o)&Hnu#6~Qv}M~}N`7hlIF 0Ӄ5 ձ-˕m]a4`* E^ɇ`xGU:pHJD0ޔE\)m Ytß hyDU.)NƓp8I"MEDe˞lkS[g+YUuLg(KiG줢b+ڄz&s}^&*WC-""1NTemAz,MT {S d,-Иn8le/ CW?eXWScaeyL_9Cew˄rLנ ^g𸽣1=|`y%VTA}ਈUB1܄W8qKKndP 'h*&f%zMk eH4WE|G;?LRiQKr,}|(H9WZYOrÜs~V۴'i۬'r׈KlQcq0  (CYii,xC-TĐ#:)!Srp /1.AVS3 B2L(*+Q iҤ}ҺpU4Red[<%nɳݒiMlnM!)tSH&r_(& ʢׂ -DQQt}HU boDfu'#TFwi= N12"Tzb*>SWdT8AsN/+4I} BQP`XL ^[ H|ZB::S+ukp23kPưΈ6eeX24djmca2bp^XX5ќ=&;dZOs "8BQ3\RLHP mlɩ7G}e1R(.HM`0+ #VFDNhRcDO0i}9v)J9=4[r%d/{Zcvh89A1Hq,$kL"=bE$EHZX55ME׆+α7VIWrÖmHz}V4*A|c9ql-һX$բrFAr,g'\ 9NAX,Xbqyi˴Cj=ӢؾtCcӻ%]%H_jʍ? .è6sjR29Y }BDŽώ28UTGX$RCN`]Solf=+N0ȅ!'J)Za;9T{;@Q  YՖsbېʌ=" Tr\d l.&6 ʠ"+o"e];-Ўz-^@+qKd+Eo~V$?_%YKu].n嬙H+iNtZ 3;iNtZֹ캖յNtZ21Z}~R֗+uO=ΐ\ֵҰm8R%V+P Vw7#`﯌%[bWFa#j9 dRŗey78d28:9N'ӻeuIpHbλ$ $rQ("L$  ൵3* "91MȰՕrNN< a^9LcϟދJߘ1o^\ E9=_F+Tz;8UOP$@o^1Os% (2{Ǐ2=m*kz̛o=KAI'{jo4 [r,Z{'8m;*/7`F5V]߼g.j7){h2x}y[Hxl՘hDꉡ|p"rDDn8+ WwL% :m#v.qs&(Nw[mHԱ]n mq94hm 9AJ(.AJ(q#k |J YoH PItO>|8[؊UmBTufv 7r'WxRk>7CQ!W?Rq] ,]w`ߢû|wJM>t(`FTy9?;Rt줽Rls912b투A(Iʕ(+ۨd:تUް_*^Jܜ i8zW:mȱNfIu^,G#>i03> ]ڱXO# wey(D {=}R#ZYK]?W oٵs lVWciFIg?: >_v柀sGjO\pq{<q__ ky˭ _&g/e#0|ǫf4v..0Ds^_j5 K7/wC #٠hљzq~:_>ǽ3l$pݟϞoPwϻY]݂o^<ʇ{]o|b/u] c;v0qW}`$.O-c5_g% ^~rou]zM/@s ÍTAd,%xwj!j!M|L6p,=?]lGu/>.XOm(p7~{9rЌnб3FJBpdF' s|>سa?:xl~A~͇ӑhZ유`ܧ7~|]ù_~?ͧˁƅ4ow1 ?N,Kθ%Wn1+Bb:u+o *q? Za>P`*,,` 0ˤ꾯-Ϣ;N{igit)բArl ϶ -!u[R^^^ŝzL? c3l a%FYa)6 V8e,/ƊXД$)Hs I 1x'ʣv@jR8w%h:.o ( SOaDS[ ªu1,o=MKS;vK /uQnwγ["@WIkQ:`x7zsNZ[[+ `[&ZbX,{DStޮƂX°LmT(C0*ڙ4,il;hTLDD׌1 @RJWP6$y) o$/ p:M'۞&Q˫{k%/[9'.}"ڧR>}6@tƿ%֞ײsM{nb2R $6\ג(< 3Z)u((ԁ'`3>s#e@1%FPSt!mR!qMrԶG)0G~?Ztoh{}";'xSwᴟh.Lia`~87cu/OvTO<եckcVȂt8.\نp2N~ⱳFr- Vy3Z2uWv&&AեAѩ;&m3)WݚTż/UPRTt7oSinRQIBItmd]YYdeմ eNhJ[-jV˴Zf#-d5-INl˔Z2Lw8ۣV\ɓwSebscab檞g(p}xZrZ-U^9+&nv<<Sh?9Y\iD!JjDzt{x7 Cm62ꋒaz~PȄ>Ϝ,N|?}F֒&,*2֑X1@R#&,Vc7i}f"/R|_bÅTĦ4a,I5(R 1-JBie" є(,(]Zߘ]za ~` ȭ111A9T2,JV˴Z2HːJְ\ZcccC"Rغͺ@c^mY%ܕǢ]Ve]VeuejZ 2BxeZ-jʹ Sմ SΖa=lLeZ-W`T.PEPy`#"cROn4E!/wgXDT yӟsĝ `qjDfvȝ ҿ0­j\7`a],1XdfIb4(ŌXIZg gK sو-p?(hY vB#jn,BxF1r^Ǣ%&>-KB+yX6sdCyzԦʗ4YANT3%oU_ա(xpOWJm+dib4Sǚ*IS"!bfH&J/$, Y&ITyD$Xec,&ęθfX̀P pL{2~RݤM}}6Lkp#"& x HHg ,-+~&WӆxiNċrp-|<* k#Dg(123 R&$%Ĥ xşiğʮɐM2dݖ{XL6hsE{7_ʚ)M˞[A[v&d`%fUeR Ǟ6vyp=w:pZ?2vuP?3MXQ JCSbu7F<Ԣt 9pbAzCKQh@3 ECxCk͕`a]r3.3BJSf)2) )2IY⨁/m 9UG6XG~q=+֝/#E0z_ ;wm#]{6U84i%<Z- diU umXY{utXu#j@K>[<^{&A۝ ȗ0KOLNग़¢H789wp%9(k;a^+MIzSP5 oK :͉P6n'Ja)(Ndei⬛h 7S\ܡ(cXmc[]mY<ƹw?}҉9o'YUi t,2Bqxl¢ZP*Tt3N޶XYeܶC8>|A**1I~[:lr۪z96aTכeSQ#=Հ N-*ŶoGBה ؝ xNh}ɰvuj=ҫ7Þv4) ].%dim;j ; K9ΡS!H%CU.xZ!se`d7:_ݥ"=]cPwn(|rņ^Z{; 1yB>A%}{'3['8qI(SŬ14#'V4I MI%QaSI-me;P-;t"(&TU#UK6%#V ʅX2i*8%1qX(atbYΤU'NeSk.mLreڢTC[.W%v6kʼЖT-}{"Oٌhx)K)eJ-;/'yj1'pì$aa#CA;var/home/core/zuul-output/logs/kubelet.log0000644000000000000000001666646415136775650017737 0ustar rootrootJan 30 00:09:55 crc systemd[1]: Starting Kubernetes Kubelet... Jan 30 00:09:56 crc kubenswrapper[5119]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 30 00:09:56 crc kubenswrapper[5119]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 30 00:09:56 crc kubenswrapper[5119]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 30 00:09:56 crc kubenswrapper[5119]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 30 00:09:56 crc kubenswrapper[5119]: Flag --pod-infra-container-image has been deprecated, will be removed in 1.35. Image garbage collector will get sandbox image information from CRI. Jan 30 00:09:56 crc kubenswrapper[5119]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.393326 5119 server.go:212] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396122 5119 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396141 5119 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396146 5119 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396150 5119 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396155 5119 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396159 5119 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396164 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImages Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396168 5119 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396173 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396178 5119 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396185 5119 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396189 5119 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396194 5119 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396199 5119 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396204 5119 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396209 5119 feature_gate.go:328] unrecognized feature gate: DNSNameResolver Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396214 5119 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396218 5119 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396222 5119 feature_gate.go:328] unrecognized feature gate: UpgradeStatus Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396226 5119 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396229 5119 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396233 5119 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396237 5119 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396241 5119 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396245 5119 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396249 5119 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396252 5119 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396257 5119 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396263 5119 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396267 5119 feature_gate.go:328] unrecognized feature gate: NewOLM Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396272 5119 feature_gate.go:328] unrecognized feature gate: ShortCertRotation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396276 5119 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396280 5119 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396285 5119 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396288 5119 feature_gate.go:328] unrecognized feature gate: GatewayAPIController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396291 5119 feature_gate.go:328] unrecognized feature gate: PinnedImages Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396295 5119 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396298 5119 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396302 5119 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396306 5119 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396309 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396313 5119 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396316 5119 feature_gate.go:328] unrecognized feature gate: Example Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396320 5119 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396323 5119 feature_gate.go:328] unrecognized feature gate: ExternalOIDC Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396327 5119 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396332 5119 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396386 5119 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396403 5119 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396407 5119 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396411 5119 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396415 5119 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396418 5119 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396422 5119 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396426 5119 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396430 5119 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396433 5119 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396436 5119 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396440 5119 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396444 5119 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396447 5119 feature_gate.go:328] unrecognized feature gate: DualReplica Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396451 5119 feature_gate.go:328] unrecognized feature gate: SignatureStores Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396454 5119 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396457 5119 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396461 5119 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396465 5119 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396470 5119 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396473 5119 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396476 5119 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396481 5119 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396484 5119 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396487 5119 feature_gate.go:328] unrecognized feature gate: InsightsConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396490 5119 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396494 5119 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396497 5119 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396500 5119 feature_gate.go:328] unrecognized feature gate: Example2 Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396503 5119 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396507 5119 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396510 5119 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396514 5119 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396518 5119 feature_gate.go:328] unrecognized feature gate: OVNObservability Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396521 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396524 5119 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396527 5119 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396531 5119 feature_gate.go:328] unrecognized feature gate: GatewayAPI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396534 5119 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396915 5119 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396920 5119 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396923 5119 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396926 5119 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396930 5119 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396933 5119 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396936 5119 feature_gate.go:328] unrecognized feature gate: GatewayAPIController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396939 5119 feature_gate.go:328] unrecognized feature gate: ExternalOIDC Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396942 5119 feature_gate.go:328] unrecognized feature gate: Example2 Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396945 5119 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396949 5119 feature_gate.go:328] unrecognized feature gate: OVNObservability Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396954 5119 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396959 5119 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396963 5119 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396967 5119 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396970 5119 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396973 5119 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396976 5119 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396980 5119 feature_gate.go:328] unrecognized feature gate: GatewayAPI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396983 5119 feature_gate.go:328] unrecognized feature gate: SignatureStores Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396986 5119 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396990 5119 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396993 5119 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.396997 5119 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397001 5119 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397006 5119 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397011 5119 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397014 5119 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397018 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397022 5119 feature_gate.go:328] unrecognized feature gate: Example Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397027 5119 feature_gate.go:328] unrecognized feature gate: DNSNameResolver Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397032 5119 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397035 5119 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397039 5119 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397043 5119 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397047 5119 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397051 5119 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397054 5119 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397058 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397061 5119 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397064 5119 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397068 5119 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397071 5119 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397075 5119 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397078 5119 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397082 5119 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397086 5119 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397089 5119 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397093 5119 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397096 5119 feature_gate.go:328] unrecognized feature gate: NewOLM Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397099 5119 feature_gate.go:328] unrecognized feature gate: InsightsConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397102 5119 feature_gate.go:328] unrecognized feature gate: DualReplica Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397105 5119 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397108 5119 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397112 5119 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397115 5119 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397118 5119 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397121 5119 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397124 5119 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397128 5119 feature_gate.go:328] unrecognized feature gate: ShortCertRotation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397132 5119 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397135 5119 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397138 5119 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397146 5119 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397150 5119 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397154 5119 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397157 5119 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397160 5119 feature_gate.go:328] unrecognized feature gate: PinnedImages Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397163 5119 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397166 5119 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397169 5119 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397172 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397176 5119 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397179 5119 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397182 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImages Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397186 5119 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397190 5119 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397194 5119 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397198 5119 feature_gate.go:328] unrecognized feature gate: UpgradeStatus Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397202 5119 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397205 5119 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397208 5119 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397211 5119 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397215 5119 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397218 5119 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.397221 5119 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398460 5119 flags.go:64] FLAG: --address="0.0.0.0" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398474 5119 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398484 5119 flags.go:64] FLAG: --anonymous-auth="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398490 5119 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398496 5119 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398500 5119 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398505 5119 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398511 5119 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398515 5119 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398522 5119 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398526 5119 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398531 5119 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398535 5119 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398539 5119 flags.go:64] FLAG: --cgroup-root="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398543 5119 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398547 5119 flags.go:64] FLAG: --client-ca-file="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398550 5119 flags.go:64] FLAG: --cloud-config="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398554 5119 flags.go:64] FLAG: --cloud-provider="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398558 5119 flags.go:64] FLAG: --cluster-dns="[]" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398563 5119 flags.go:64] FLAG: --cluster-domain="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398568 5119 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398578 5119 flags.go:64] FLAG: --config-dir="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398582 5119 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398587 5119 flags.go:64] FLAG: --container-log-max-files="5" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398593 5119 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398599 5119 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398603 5119 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398607 5119 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398611 5119 flags.go:64] FLAG: --contention-profiling="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398616 5119 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398619 5119 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398624 5119 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398628 5119 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398634 5119 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398638 5119 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398642 5119 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398646 5119 flags.go:64] FLAG: --enable-load-reader="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398650 5119 flags.go:64] FLAG: --enable-server="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398654 5119 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398661 5119 flags.go:64] FLAG: --event-burst="100" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398665 5119 flags.go:64] FLAG: --event-qps="50" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398671 5119 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398675 5119 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398679 5119 flags.go:64] FLAG: --eviction-hard="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398685 5119 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398689 5119 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398693 5119 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398697 5119 flags.go:64] FLAG: --eviction-soft="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398702 5119 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398707 5119 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398711 5119 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398715 5119 flags.go:64] FLAG: --experimental-mounter-path="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398719 5119 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398725 5119 flags.go:64] FLAG: --fail-swap-on="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398729 5119 flags.go:64] FLAG: --feature-gates="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398735 5119 flags.go:64] FLAG: --file-check-frequency="20s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398739 5119 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398743 5119 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398748 5119 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398752 5119 flags.go:64] FLAG: --healthz-port="10248" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398757 5119 flags.go:64] FLAG: --help="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398761 5119 flags.go:64] FLAG: --hostname-override="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398765 5119 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398769 5119 flags.go:64] FLAG: --http-check-frequency="20s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398773 5119 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398777 5119 flags.go:64] FLAG: --image-credential-provider-config="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398781 5119 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398784 5119 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398788 5119 flags.go:64] FLAG: --image-service-endpoint="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398792 5119 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398796 5119 flags.go:64] FLAG: --kube-api-burst="100" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398800 5119 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398804 5119 flags.go:64] FLAG: --kube-api-qps="50" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398810 5119 flags.go:64] FLAG: --kube-reserved="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398814 5119 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398818 5119 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398822 5119 flags.go:64] FLAG: --kubelet-cgroups="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398826 5119 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398830 5119 flags.go:64] FLAG: --lock-file="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398834 5119 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398838 5119 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398842 5119 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398849 5119 flags.go:64] FLAG: --log-json-split-stream="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398852 5119 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398856 5119 flags.go:64] FLAG: --log-text-split-stream="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398863 5119 flags.go:64] FLAG: --logging-format="text" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398867 5119 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398871 5119 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398876 5119 flags.go:64] FLAG: --manifest-url="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398879 5119 flags.go:64] FLAG: --manifest-url-header="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398885 5119 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398890 5119 flags.go:64] FLAG: --max-open-files="1000000" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398896 5119 flags.go:64] FLAG: --max-pods="110" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398900 5119 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398904 5119 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398908 5119 flags.go:64] FLAG: --memory-manager-policy="None" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398912 5119 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398917 5119 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398921 5119 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398925 5119 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhel" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398942 5119 flags.go:64] FLAG: --node-status-max-images="50" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398946 5119 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398950 5119 flags.go:64] FLAG: --oom-score-adj="-999" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398954 5119 flags.go:64] FLAG: --pod-cidr="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398958 5119 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc2b30e70040205c2536d01ae5c850be1ed2d775cf13249e50328e5085777977" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398967 5119 flags.go:64] FLAG: --pod-manifest-path="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398971 5119 flags.go:64] FLAG: --pod-max-pids="-1" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398975 5119 flags.go:64] FLAG: --pods-per-core="0" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398979 5119 flags.go:64] FLAG: --port="10250" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398983 5119 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398987 5119 flags.go:64] FLAG: --provider-id="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398991 5119 flags.go:64] FLAG: --qos-reserved="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.398996 5119 flags.go:64] FLAG: --read-only-port="10255" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399000 5119 flags.go:64] FLAG: --register-node="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399004 5119 flags.go:64] FLAG: --register-schedulable="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399009 5119 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399018 5119 flags.go:64] FLAG: --registry-burst="10" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399080 5119 flags.go:64] FLAG: --registry-qps="5" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399087 5119 flags.go:64] FLAG: --reserved-cpus="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399091 5119 flags.go:64] FLAG: --reserved-memory="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399097 5119 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399101 5119 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399105 5119 flags.go:64] FLAG: --rotate-certificates="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399110 5119 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399115 5119 flags.go:64] FLAG: --runonce="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399119 5119 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399124 5119 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399128 5119 flags.go:64] FLAG: --seccomp-default="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399132 5119 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399136 5119 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399141 5119 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399145 5119 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399150 5119 flags.go:64] FLAG: --storage-driver-password="root" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399154 5119 flags.go:64] FLAG: --storage-driver-secure="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399159 5119 flags.go:64] FLAG: --storage-driver-table="stats" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399164 5119 flags.go:64] FLAG: --storage-driver-user="root" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399168 5119 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399174 5119 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399178 5119 flags.go:64] FLAG: --system-cgroups="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399183 5119 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399189 5119 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399193 5119 flags.go:64] FLAG: --tls-cert-file="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399198 5119 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399204 5119 flags.go:64] FLAG: --tls-min-version="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399208 5119 flags.go:64] FLAG: --tls-private-key-file="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399212 5119 flags.go:64] FLAG: --topology-manager-policy="none" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399216 5119 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399220 5119 flags.go:64] FLAG: --topology-manager-scope="container" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399225 5119 flags.go:64] FLAG: --v="2" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399231 5119 flags.go:64] FLAG: --version="false" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399240 5119 flags.go:64] FLAG: --vmodule="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399247 5119 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.399251 5119 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399368 5119 feature_gate.go:328] unrecognized feature gate: Example Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399374 5119 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399379 5119 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399383 5119 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399402 5119 feature_gate.go:328] unrecognized feature gate: Example2 Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399406 5119 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399409 5119 feature_gate.go:328] unrecognized feature gate: NewOLM Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399413 5119 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399417 5119 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399420 5119 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399425 5119 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399429 5119 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399434 5119 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399438 5119 feature_gate.go:328] unrecognized feature gate: DualReplica Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399443 5119 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399449 5119 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399455 5119 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399460 5119 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399465 5119 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399469 5119 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399472 5119 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399476 5119 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399480 5119 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399483 5119 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399487 5119 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399491 5119 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399495 5119 feature_gate.go:328] unrecognized feature gate: UpgradeStatus Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399498 5119 feature_gate.go:328] unrecognized feature gate: PinnedImages Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399502 5119 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399507 5119 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399511 5119 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399514 5119 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399518 5119 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399521 5119 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399525 5119 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399529 5119 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399532 5119 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399536 5119 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399540 5119 feature_gate.go:328] unrecognized feature gate: ExternalOIDC Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399544 5119 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399547 5119 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399550 5119 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399554 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImages Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399557 5119 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399561 5119 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399566 5119 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399570 5119 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399574 5119 feature_gate.go:328] unrecognized feature gate: GatewayAPIController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399579 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399582 5119 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399586 5119 feature_gate.go:328] unrecognized feature gate: SignatureStores Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399590 5119 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399593 5119 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399597 5119 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399600 5119 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399603 5119 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399607 5119 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399610 5119 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399614 5119 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399618 5119 feature_gate.go:328] unrecognized feature gate: GatewayAPI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399621 5119 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399627 5119 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399631 5119 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399635 5119 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399638 5119 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399642 5119 feature_gate.go:328] unrecognized feature gate: InsightsConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399645 5119 feature_gate.go:328] unrecognized feature gate: DNSNameResolver Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399649 5119 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399653 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399657 5119 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399661 5119 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399664 5119 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399668 5119 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399672 5119 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399675 5119 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399679 5119 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399683 5119 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399687 5119 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399691 5119 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399694 5119 feature_gate.go:328] unrecognized feature gate: OVNObservability Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399700 5119 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399704 5119 feature_gate.go:328] unrecognized feature gate: ShortCertRotation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399707 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399711 5119 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399715 5119 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.399718 5119 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.400551 5119 feature_gate.go:384] feature gates: {map[DynamicResourceAllocation:false EventedPLEG:false ImageVolume:true KMSv1:true MaxUnavailableStatefulSet:false MinimumKubeletVersion:false MutatingAdmissionPolicy:false NodeSwap:false ProcMountType:true RouteExternalCertificate:true SELinuxMount:false ServiceAccountTokenNodeBinding:true StoragePerformantSecurityPolicy:true TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:true UserNamespacesSupport:true VolumeAttributesClass:false]} Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.417043 5119 server.go:530] "Kubelet version" kubeletVersion="v1.33.5" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.417079 5119 server.go:532] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417163 5119 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417170 5119 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417174 5119 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417178 5119 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417183 5119 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417191 5119 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417197 5119 feature_gate.go:328] unrecognized feature gate: SignatureStores Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417201 5119 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417206 5119 feature_gate.go:328] unrecognized feature gate: Example Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417210 5119 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417214 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImages Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417219 5119 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417223 5119 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417227 5119 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417231 5119 feature_gate.go:328] unrecognized feature gate: NewOLM Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417235 5119 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417238 5119 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417242 5119 feature_gate.go:328] unrecognized feature gate: ExternalOIDC Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417248 5119 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417254 5119 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417262 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417267 5119 feature_gate.go:328] unrecognized feature gate: InsightsConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417272 5119 feature_gate.go:328] unrecognized feature gate: ShortCertRotation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417279 5119 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417284 5119 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417288 5119 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417292 5119 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417297 5119 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417302 5119 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417306 5119 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417310 5119 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417313 5119 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417317 5119 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417320 5119 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417325 5119 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417330 5119 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417334 5119 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417339 5119 feature_gate.go:328] unrecognized feature gate: OVNObservability Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417344 5119 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417349 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417353 5119 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417357 5119 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417362 5119 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417368 5119 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417372 5119 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417377 5119 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417381 5119 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417403 5119 feature_gate.go:328] unrecognized feature gate: DualReplica Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417408 5119 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417413 5119 feature_gate.go:328] unrecognized feature gate: UpgradeStatus Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417417 5119 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417420 5119 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417424 5119 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417428 5119 feature_gate.go:328] unrecognized feature gate: GatewayAPI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417432 5119 feature_gate.go:328] unrecognized feature gate: DNSNameResolver Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417436 5119 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417440 5119 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417444 5119 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417447 5119 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417451 5119 feature_gate.go:328] unrecognized feature gate: GatewayAPIController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417455 5119 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417458 5119 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417462 5119 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417465 5119 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417469 5119 feature_gate.go:328] unrecognized feature gate: Example2 Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417473 5119 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417476 5119 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417480 5119 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417483 5119 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417487 5119 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417491 5119 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417495 5119 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417498 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417502 5119 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417506 5119 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417511 5119 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417517 5119 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417521 5119 feature_gate.go:328] unrecognized feature gate: PinnedImages Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417525 5119 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417529 5119 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417533 5119 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417536 5119 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417540 5119 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417543 5119 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417547 5119 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417551 5119 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.417559 5119 feature_gate.go:384] feature gates: {map[DynamicResourceAllocation:false EventedPLEG:false ImageVolume:true KMSv1:true MaxUnavailableStatefulSet:false MinimumKubeletVersion:false MutatingAdmissionPolicy:false NodeSwap:false ProcMountType:true RouteExternalCertificate:true SELinuxMount:false ServiceAccountTokenNodeBinding:true StoragePerformantSecurityPolicy:true TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:true UserNamespacesSupport:true VolumeAttributesClass:false]} Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417701 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417711 5119 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417715 5119 feature_gate.go:328] unrecognized feature gate: PinnedImages Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417720 5119 feature_gate.go:328] unrecognized feature gate: ExternalOIDC Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417724 5119 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417728 5119 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417732 5119 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417736 5119 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417740 5119 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417744 5119 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417747 5119 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417751 5119 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417754 5119 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417758 5119 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417762 5119 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417765 5119 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417769 5119 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417773 5119 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417777 5119 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417781 5119 feature_gate.go:328] unrecognized feature gate: GatewayAPIController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417784 5119 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417788 5119 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417792 5119 feature_gate.go:328] unrecognized feature gate: Example2 Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417796 5119 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417800 5119 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417803 5119 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417809 5119 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417813 5119 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417816 5119 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417820 5119 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417823 5119 feature_gate.go:328] unrecognized feature gate: DNSNameResolver Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417827 5119 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417833 5119 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417836 5119 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417840 5119 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417843 5119 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417847 5119 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417851 5119 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417854 5119 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417859 5119 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417865 5119 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417868 5119 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417872 5119 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417876 5119 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417880 5119 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417883 5119 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417887 5119 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417890 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417894 5119 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417898 5119 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417902 5119 feature_gate.go:328] unrecognized feature gate: NewOLM Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417906 5119 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417910 5119 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417913 5119 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417917 5119 feature_gate.go:328] unrecognized feature gate: SignatureStores Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417922 5119 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417925 5119 feature_gate.go:328] unrecognized feature gate: InsightsConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417929 5119 feature_gate.go:328] unrecognized feature gate: Example Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417933 5119 feature_gate.go:328] unrecognized feature gate: OVNObservability Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417936 5119 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417940 5119 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417944 5119 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417947 5119 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417951 5119 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417955 5119 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417959 5119 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417963 5119 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417966 5119 feature_gate.go:328] unrecognized feature gate: DualReplica Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417970 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417973 5119 feature_gate.go:328] unrecognized feature gate: GatewayAPI Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417977 5119 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417980 5119 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417985 5119 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417990 5119 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417994 5119 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.417997 5119 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.418001 5119 feature_gate.go:328] unrecognized feature gate: UpgradeStatus Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.418004 5119 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.418008 5119 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.418011 5119 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.418015 5119 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.418018 5119 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.418022 5119 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.418026 5119 feature_gate.go:328] unrecognized feature gate: ShortCertRotation Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.418029 5119 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.418033 5119 feature_gate.go:328] unrecognized feature gate: ManagedBootImages Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.418041 5119 feature_gate.go:384] feature gates: {map[DynamicResourceAllocation:false EventedPLEG:false ImageVolume:true KMSv1:true MaxUnavailableStatefulSet:false MinimumKubeletVersion:false MutatingAdmissionPolicy:false NodeSwap:false ProcMountType:true RouteExternalCertificate:true SELinuxMount:false ServiceAccountTokenNodeBinding:true StoragePerformantSecurityPolicy:true TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:true UserNamespacesSupport:true VolumeAttributesClass:false]} Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.419226 5119 server.go:962] "Client rotation is on, will bootstrap in background" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.425045 5119 bootstrap.go:266] "Unhandled Error" err="part of the existing bootstrap client certificate in /var/lib/kubelet/kubeconfig is expired: 2025-12-03 08:27:53 +0000 UTC" logger="UnhandledError" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.429840 5119 bootstrap.go:101] "Use the bootstrap credentials to request a cert, and set kubeconfig to point to the certificate dir" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.430010 5119 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.436013 5119 server.go:1019] "Starting client certificate rotation" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.436235 5119 certificate_manager.go:422] "Certificate rotation is enabled" logger="kubernetes.io/kube-apiserver-client-kubelet" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.436348 5119 certificate_manager.go:566] "Rotating certificates" logger="kubernetes.io/kube-apiserver-client-kubelet" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.466069 5119 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.471380 5119 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.471499 5119 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.489144 5119 log.go:25] "Validated CRI v1 runtime API" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.537850 5119 log.go:25] "Validated CRI v1 image API" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.540583 5119 server.go:1452] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.545607 5119 fs.go:135] Filesystem UUIDs: map[19e76f87-96b8-4794-9744-0b33dca22d5b:/dev/vda3 2026-01-30-00-03-17-00:/dev/sr0 5eb7c122-420e-4494-80ec-41664070d7b6:/dev/vda4 7B77-95E7:/dev/vda2] Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.545679 5119 fs.go:136] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:45 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:31 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:44 fsType:tmpfs blockSize:0} composefs_0-33:{mountpoint:/ major:0 minor:33 fsType:overlay blockSize:0}] Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.570130 5119 manager.go:217] Machine: {Timestamp:2026-01-30 00:09:56.567117621 +0000 UTC m=+0.581180110 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33649930240 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:80bc4fba336e4ca1bc9d28a8be52a356 SystemUUID:fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42 BootID:0450a234-c8a5-4d6e-a553-22c02a94238f Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:composefs_0-33 DeviceMajor:0 DeviceMinor:33 Capacity:6545408 Type:vfs Inodes:18446744073709551615 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:31 Capacity:16824967168 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16824963072 Type:vfs Inodes:4107657 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6729986048 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:45 Capacity:3364990976 Type:vfs Inodes:821531 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:44 Capacity:1073741824 Type:vfs Inodes:4107657 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:26:f1:c8 Speed:0 Mtu:1500} {Name:br-int MacAddress:b2:a9:9f:57:07:84 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:26:f1:c8 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:ab:74:d4 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:cd:f2:4d Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:25:e7:27 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:bc:89:07 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:2a:42:33:58:fa:ea Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:a2:2d:ac:6d:8f:01 Speed:0 Mtu:1500} {Name:tap0 MacAddress:5a:94:ef:e4:0c:ee Speed:10 Mtu:1500}] Topology:[{Id:0 Memory:33649930240 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.570427 5119 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.570674 5119 manager.go:233] Version: {KernelVersion:5.14.0-570.57.1.el9_6.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 9.6.20251021-0 (Plow) DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.573200 5119 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.573256 5119 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.573572 5119 topology_manager.go:138] "Creating topology manager with none policy" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.573591 5119 container_manager_linux.go:306] "Creating device plugin manager" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.573635 5119 manager.go:141] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.574106 5119 server.go:72] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.574484 5119 state_mem.go:36] "Initialized new in-memory state store" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.574713 5119 server.go:1267] "Using root directory" path="/var/lib/kubelet" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.577059 5119 kubelet.go:491] "Attempting to sync node with API server" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.577087 5119 kubelet.go:386] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.577114 5119 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.577134 5119 kubelet.go:397] "Adding apiserver pod source" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.577160 5119 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.580647 5119 state_checkpoint.go:81] "State checkpoint: restored pod resource state from checkpoint" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.580671 5119 state_mem.go:40] "Initialized new in-memory state store for pod resource information tracking" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.582665 5119 state_checkpoint.go:81] "State checkpoint: restored pod resource state from checkpoint" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.582681 5119 state_mem.go:40] "Initialized new in-memory state store for pod resource information tracking" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.583877 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.584069 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.586595 5119 kuberuntime_manager.go:279] "Container runtime initialized" containerRuntime="cri-o" version="1.33.5-3.rhaos4.20.gitd0ea985.el9" apiVersion="v1" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.587030 5119 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-server-current.pem" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.588100 5119 kubelet.go:953] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592005 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592056 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592073 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592088 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592103 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592118 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592135 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592153 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592179 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592212 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592236 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.592989 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.594212 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.594245 5119 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/image" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.597211 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.198:6443: connect: connection refused Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.627159 5119 watchdog_linux.go:99] "Systemd watchdog is not enabled" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.627273 5119 server.go:1295] "Started kubelet" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.627517 5119 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.628139 5119 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.628384 5119 server_v1.go:47] "podresources" method="list" useActivePods=true Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.629360 5119 server.go:255] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 30 00:09:56 crc systemd[1]: Started Kubernetes Kubelet. Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.631515 5119 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.633268 5119 certificate_manager.go:422] "Certificate rotation is enabled" logger="kubernetes.io/kubelet-serving" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.634912 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.635464 5119 volume_manager.go:295] "The desired_state_of_world populator starts" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.635482 5119 volume_manager.go:297] "Starting Kubelet Volume Manager" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.635740 5119 desired_state_of_world_populator.go:150] "Desired state populator starts to run" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.637200 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.641198 5119 server.go:317] "Adding debug handlers to kubelet server" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.642116 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="200ms" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.642235 5119 factory.go:55] Registering systemd factory Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.642285 5119 factory.go:223] Registration of the systemd container factory successfully Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.642188 5119 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.198:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188f59b14dadbd0d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.627209485 +0000 UTC m=+0.641271984,LastTimestamp:2026-01-30 00:09:56.627209485 +0000 UTC m=+0.641271984,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.650114 5119 factory.go:153] Registering CRI-O factory Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.650162 5119 factory.go:223] Registration of the crio container factory successfully Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.650466 5119 factory.go:221] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.650497 5119 factory.go:103] Registering Raw factory Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.650517 5119 manager.go:1196] Started watching for new ooms in manager Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.651632 5119 manager.go:319] Starting recovery of all containers Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.683956 5119 manager.go:324] Recovery completed Jan 30 00:09:56 crc kubenswrapper[5119]: W0130 00:09:56.695608 5119 helpers.go:245] readString: Failed to read "/sys/fs/cgroup/system.slice/ocp-userpasswords.service/memory.min": read /sys/fs/cgroup/system.slice/ocp-userpasswords.service/memory.min: no such device Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.701238 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.703283 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.703325 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.703338 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.703998 5119 cpu_manager.go:222] "Starting CPU manager" policy="none" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.704015 5119 cpu_manager.go:223] "Reconciling" reconcilePeriod="10s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.704032 5119 state_mem.go:36] "Initialized new in-memory state store" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.714940 5119 policy_none.go:49] "None policy: Start" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.714978 5119 memory_manager.go:186] "Starting memorymanager" policy="None" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.714993 5119 state_mem.go:35] "Initializing new in-memory state store" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726568 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="428b39f5-eb1c-4f65-b7a4-eeb6e84860cc" volumeName="kubernetes.io/configmap/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-iptables-alerter-script" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726665 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-bound-sa-token" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726689 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="92dfbade-90b6-4169-8c07-72cff7f2c82b" volumeName="kubernetes.io/projected/92dfbade-90b6-4169-8c07-72cff7f2c82b-kube-api-access-4g8ts" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726704 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a208c9c2-333b-4b4a-be0d-bc32ec38a821" volumeName="kubernetes.io/projected/a208c9c2-333b-4b4a-be0d-bc32ec38a821-kube-api-access-26xrl" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726719 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cc85e424-18b2-4924-920b-bd291a8c4b01" volumeName="kubernetes.io/projected/cc85e424-18b2-4924-920b-bd291a8c4b01-kube-api-access-xfp5s" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726735 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="16bdd140-dce1-464c-ab47-dd5798d1d256" volumeName="kubernetes.io/secret/16bdd140-dce1-464c-ab47-dd5798d1d256-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726751 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="869851b9-7ffb-4af0-b166-1d8aa40a5f80" volumeName="kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-binary-copy" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726767 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" volumeName="kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-catalog-content" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726789 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-trusted-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726806 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a208c9c2-333b-4b4a-be0d-bc32ec38a821" volumeName="kubernetes.io/secret/a208c9c2-333b-4b4a-be0d-bc32ec38a821-package-server-manager-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726820 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b4750666-1362-4001-abd0-6f89964cc621" volumeName="kubernetes.io/projected/b4750666-1362-4001-abd0-6f89964cc621-kube-api-access-twvbl" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726837 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726853 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="736c54fe-349c-4bb9-870a-d1c1d1c03831" volumeName="kubernetes.io/projected/736c54fe-349c-4bb9-870a-d1c1d1c03831-kube-api-access-6dmhf" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726869 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f7e2c886-118e-43bb-bef1-c78134de392b" volumeName="kubernetes.io/projected/f7e2c886-118e-43bb-bef1-c78134de392b-kube-api-access-6g4lr" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726890 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc8db2c7-859d-47b3-a900-2bd0c0b2973b" volumeName="kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-auth-proxy-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726906 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b638b8f4bb0070e40528db779baf6a2" volumeName="kubernetes.io/empty-dir/0b638b8f4bb0070e40528db779baf6a2-tmp" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726925 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" volumeName="kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-utilities" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726940 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="736c54fe-349c-4bb9-870a-d1c1d1c03831" volumeName="kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726955 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7599e0b6-bddf-4def-b7f2-0b32206e8651" volumeName="kubernetes.io/projected/7599e0b6-bddf-4def-b7f2-0b32206e8651-kube-api-access-ptkcf" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726971 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7df94c10-441d-4386-93a6-6730fb7bcde0" volumeName="kubernetes.io/projected/7df94c10-441d-4386-93a6-6730fb7bcde0-kube-api-access-nmmzf" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.726990 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="94a6e063-3d1a-4d44-875d-185291448c31" volumeName="kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-catalog-content" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727004 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a7a88189-c967-4640-879e-27665747f20c" volumeName="kubernetes.io/projected/a7a88189-c967-4640-879e-27665747f20c-kube-api-access-8nspp" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727018 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d7e8f42f-dc0e-424b-bb56-5ec849834888" volumeName="kubernetes.io/configmap/d7e8f42f-dc0e-424b-bb56-5ec849834888-service-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727045 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09cfa50b-4138-4585-a53e-64dd3ab73335" volumeName="kubernetes.io/projected/09cfa50b-4138-4585-a53e-64dd3ab73335-kube-api-access-zsb9b" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727062 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727076 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c5f2bfad-70f6-4185-a3d9-81ce12720767" volumeName="kubernetes.io/empty-dir/c5f2bfad-70f6-4185-a3d9-81ce12720767-tmp-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727096 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727110 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" volumeName="kubernetes.io/configmap/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-trusted-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727131 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" volumeName="kubernetes.io/projected/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-kube-api-access-qqbfk" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727145 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d7e8f42f-dc0e-424b-bb56-5ec849834888" volumeName="kubernetes.io/projected/d7e8f42f-dc0e-424b-bb56-5ec849834888-kube-api-access" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727160 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" volumeName="kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727176 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727192 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-serving-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727206 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" volumeName="kubernetes.io/projected/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-kube-api-access-5lcfw" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727223 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc8db2c7-859d-47b3-a900-2bd0c0b2973b" volumeName="kubernetes.io/projected/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-kube-api-access-hckvg" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727239 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="18f80adb-c1c3-49ba-8ee4-932c851d3897" volumeName="kubernetes.io/projected/18f80adb-c1c3-49ba-8ee4-932c851d3897-kube-api-access-wbmqg" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727255 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31fa8943-81cc-4750-a0b7-0fa9ab5af883" volumeName="kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-utilities" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727271 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-audit-policies" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727288 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/empty-dir/9e9b5059-1b3e-4067-a63d-2952cbe863af-ca-trust-extracted" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727303 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc4541ce-7789-4670-bc75-5c2868e52ce0" volumeName="kubernetes.io/secret/fc4541ce-7789-4670-bc75-5c2868e52ce0-webhook-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727320 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31fa8943-81cc-4750-a0b7-0fa9ab5af883" volumeName="kubernetes.io/projected/31fa8943-81cc-4750-a0b7-0fa9ab5af883-kube-api-access-grwfz" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727334 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b4750666-1362-4001-abd0-6f89964cc621" volumeName="kubernetes.io/configmap/b4750666-1362-4001-abd0-6f89964cc621-mcc-auth-proxy-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727350 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="18f80adb-c1c3-49ba-8ee4-932c851d3897" volumeName="kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-stats-auth" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727367 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="301e1965-1754-483d-b6cc-bfae7038bbca" volumeName="kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-srv-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727383 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="584e1f4a-8205-47d7-8efb-3afc6017c4c9" volumeName="kubernetes.io/projected/584e1f4a-8205-47d7-8efb-3afc6017c4c9-kube-api-access-tknt7" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727477 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="94a6e063-3d1a-4d44-875d-185291448c31" volumeName="kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-utilities" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727517 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/secret/9e9b5059-1b3e-4067-a63d-2952cbe863af-installation-pull-secrets" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727541 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727555 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d565531a-ff86-4608-9d19-767de01ac31b" volumeName="kubernetes.io/projected/d565531a-ff86-4608-9d19-767de01ac31b-kube-api-access-99zj9" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727574 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-error" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727598 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7afa918d-be67-40a6-803c-d3b0ae99d815" volumeName="kubernetes.io/projected/7afa918d-be67-40a6-803c-d3b0ae99d815-kube-api-access" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727622 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7afa918d-be67-40a6-803c-d3b0ae99d815" volumeName="kubernetes.io/secret/7afa918d-be67-40a6-803c-d3b0ae99d815-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727642 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/projected/6edfcf45-925b-4eff-b940-95b6fc0b85d4-kube-api-access-8nb9c" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727661 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7afa918d-be67-40a6-803c-d3b0ae99d815" volumeName="kubernetes.io/empty-dir/7afa918d-be67-40a6-803c-d3b0ae99d815-tmp" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727679 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="81e39f7b-62e4-4fc9-992a-6535ce127a02" volumeName="kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-multus-daemon-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727692 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b605f283-6f2e-42da-a838-54421690f7d0" volumeName="kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-catalog-content" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727715 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-audit-policies" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727732 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c491984c-7d4b-44aa-8c1e-d7974424fa47" volumeName="kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-images" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727747 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d565531a-ff86-4608-9d19-767de01ac31b" volumeName="kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-images" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727768 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-kube-api-access-tkdh6" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727789 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="593a3561-7760-45c5-8f91-5aaef7475d0f" volumeName="kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-certs" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727810 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6077b63e-53a2-4f96-9d56-1ce0324e4913" volumeName="kubernetes.io/projected/6077b63e-53a2-4f96-9d56-1ce0324e4913-kube-api-access-zth6t" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727830 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="2325ffef-9d5b-447f-b00e-3efc429acefe" volumeName="kubernetes.io/projected/2325ffef-9d5b-447f-b00e-3efc429acefe-kube-api-access-zg8nc" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727851 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="34177974-8d82-49d2-a763-391d0df3bbd8" volumeName="kubernetes.io/secret/34177974-8d82-49d2-a763-391d0df3bbd8-metrics-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727870 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" volumeName="kubernetes.io/empty-dir/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-tmpfs" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727888 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af33e427-6803-48c2-a76a-dd9deb7cbf9a" volumeName="kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727912 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c491984c-7d4b-44aa-8c1e-d7974424fa47" volumeName="kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727934 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0dd0fbac-8c0d-4228-8faa-abbeedabf7db" volumeName="kubernetes.io/secret/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-webhook-certs" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727953 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/configmap/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-trusted-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727971 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7599e0b6-bddf-4def-b7f2-0b32206e8651" volumeName="kubernetes.io/secret/7599e0b6-bddf-4def-b7f2-0b32206e8651-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.727988 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ce090a97-9ab6-4c40-a719-64ff2acd9778" volumeName="kubernetes.io/secret/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-key" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728007 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f71a554-e414-4bc3-96d2-674060397afe" volumeName="kubernetes.io/configmap/9f71a554-e414-4bc3-96d2-674060397afe-trusted-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728026 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d565531a-ff86-4608-9d19-767de01ac31b" volumeName="kubernetes.io/secret/d565531a-ff86-4608-9d19-767de01ac31b-proxy-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728044 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="16bdd140-dce1-464c-ab47-dd5798d1d256" volumeName="kubernetes.io/projected/16bdd140-dce1-464c-ab47-dd5798d1d256-kube-api-access-94l9h" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728062 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="34177974-8d82-49d2-a763-391d0df3bbd8" volumeName="kubernetes.io/projected/34177974-8d82-49d2-a763-391d0df3bbd8-kube-api-access-m7xz2" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728082 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="593a3561-7760-45c5-8f91-5aaef7475d0f" volumeName="kubernetes.io/projected/593a3561-7760-45c5-8f91-5aaef7475d0f-kube-api-access-sbc2l" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728100 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-trusted-ca-bundle" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728119 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="149b3c48-e17c-4a66-a835-d86dabf6ff13" volumeName="kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-catalog-content" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728145 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="428b39f5-eb1c-4f65-b7a4-eeb6e84860cc" volumeName="kubernetes.io/projected/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-kube-api-access-dsgwk" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728163 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c491984c-7d4b-44aa-8c1e-d7974424fa47" volumeName="kubernetes.io/secret/c491984c-7d4b-44aa-8c1e-d7974424fa47-machine-api-operator-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728181 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cc85e424-18b2-4924-920b-bd291a8c4b01" volumeName="kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-catalog-content" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728199 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc8db2c7-859d-47b3-a900-2bd0c0b2973b" volumeName="kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728221 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" volumeName="kubernetes.io/projected/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-kube-api-access-ddlk9" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728250 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01080b46-74f1-4191-8755-5152a57b3b25" volumeName="kubernetes.io/projected/01080b46-74f1-4191-8755-5152a57b3b25-kube-api-access-w94wk" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728271 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6077b63e-53a2-4f96-9d56-1ce0324e4913" volumeName="kubernetes.io/empty-dir/6077b63e-53a2-4f96-9d56-1ce0324e4913-tmp-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728289 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" volumeName="kubernetes.io/secret/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-operator-metrics" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728307 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="18f80adb-c1c3-49ba-8ee4-932c851d3897" volumeName="kubernetes.io/configmap/18f80adb-c1c3-49ba-8ee4-932c851d3897-service-ca-bundle" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728325 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-bound-sa-token" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728344 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="301e1965-1754-483d-b6cc-bfae7038bbca" volumeName="kubernetes.io/empty-dir/301e1965-1754-483d-b6cc-bfae7038bbca-tmpfs" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728361 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="301e1965-1754-483d-b6cc-bfae7038bbca" volumeName="kubernetes.io/projected/301e1965-1754-483d-b6cc-bfae7038bbca-kube-api-access-7jjkz" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728379 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-service-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728422 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="869851b9-7ffb-4af0-b166-1d8aa40a5f80" volumeName="kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-sysctl-allowlist" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728442 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e1d2a42d-af1d-4054-9618-ab545e0ed8b7" volumeName="kubernetes.io/projected/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-kube-api-access-9z4sw" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728460 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="2325ffef-9d5b-447f-b00e-3efc429acefe" volumeName="kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728479 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f0bc7fcb0822a2c13eb2d22cd8c0641" volumeName="kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-tmp-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728497 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" volumeName="kubernetes.io/projected/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-kube-api-access-dztfv" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728513 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e1d2a42d-af1d-4054-9618-ab545e0ed8b7" volumeName="kubernetes.io/configmap/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-mcd-auth-proxy-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728530 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/empty-dir/a555ff2e-0be6-46d5-897d-863bb92ae2b3-tmp" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728550 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-client" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728567 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e093be35-bb62-4843-b2e8-094545761610" volumeName="kubernetes.io/projected/e093be35-bb62-4843-b2e8-094545761610-kube-api-access-pddnv" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728585 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09cfa50b-4138-4585-a53e-64dd3ab73335" volumeName="kubernetes.io/secret/09cfa50b-4138-4585-a53e-64dd3ab73335-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728603 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7df94c10-441d-4386-93a6-6730fb7bcde0" volumeName="kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-env-overrides" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728622 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-kube-api-access-ws8zz" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728639 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a7a88189-c967-4640-879e-27665747f20c" volumeName="kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-apiservice-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728655 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b605f283-6f2e-42da-a838-54421690f7d0" volumeName="kubernetes.io/projected/b605f283-6f2e-42da-a838-54421690f7d0-kube-api-access-6rmnv" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728670 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-encryption-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728688 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-trusted-ca-bundle" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728707 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="2325ffef-9d5b-447f-b00e-3efc429acefe" volumeName="kubernetes.io/secret/2325ffef-9d5b-447f-b00e-3efc429acefe-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728725 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/projected/567683bd-0efc-4f21-b076-e28559628404-kube-api-access-m26jq" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728743 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" volumeName="kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.728768 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732338 5119 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b1264ac67579ad07e7e9003054d44fe40dd55285a4b2f7dc74e48be1aee0868a/globalmount" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732485 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="2325ffef-9d5b-447f-b00e-3efc429acefe" volumeName="kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-trusted-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732588 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6077b63e-53a2-4f96-9d56-1ce0324e4913" volumeName="kubernetes.io/secret/6077b63e-53a2-4f96-9d56-1ce0324e4913-metrics-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732609 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c5f2bfad-70f6-4185-a3d9-81ce12720767" volumeName="kubernetes.io/configmap/c5f2bfad-70f6-4185-a3d9-81ce12720767-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732624 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-oauth-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732653 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7599e0b6-bddf-4def-b7f2-0b32206e8651" volumeName="kubernetes.io/configmap/7599e0b6-bddf-4def-b7f2-0b32206e8651-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732694 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b4750666-1362-4001-abd0-6f89964cc621" volumeName="kubernetes.io/secret/b4750666-1362-4001-abd0-6f89964cc621-proxy-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732709 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ce090a97-9ab6-4c40-a719-64ff2acd9778" volumeName="kubernetes.io/configmap/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-cabundle" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732726 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f65c0ac1-8bca-454d-a2e6-e35cb418beac" volumeName="kubernetes.io/configmap/f65c0ac1-8bca-454d-a2e6-e35cb418beac-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732740 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f65c0ac1-8bca-454d-a2e6-e35cb418beac" volumeName="kubernetes.io/secret/f65c0ac1-8bca-454d-a2e6-e35cb418beac-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732755 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/projected/d19cb085-0c5b-4810-b654-ce7923221d90-kube-api-access-m5lgh" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732770 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f7e2c886-118e-43bb-bef1-c78134de392b" volumeName="kubernetes.io/empty-dir/f7e2c886-118e-43bb-bef1-c78134de392b-tmp-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732785 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732798 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-oauth-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732816 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f71a554-e414-4bc3-96d2-674060397afe" volumeName="kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-kube-api-access-ftwb6" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732831 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" volumeName="kubernetes.io/empty-dir/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-tmp" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732845 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="736c54fe-349c-4bb9-870a-d1c1d1c03831" volumeName="kubernetes.io/secret/736c54fe-349c-4bb9-870a-d1c1d1c03831-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732859 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="92dfbade-90b6-4169-8c07-72cff7f2c82b" volumeName="kubernetes.io/configmap/92dfbade-90b6-4169-8c07-72cff7f2c82b-config-volume" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732872 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b605f283-6f2e-42da-a838-54421690f7d0" volumeName="kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-utilities" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732887 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" volumeName="kubernetes.io/configmap/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732924 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="149b3c48-e17c-4a66-a835-d86dabf6ff13" volumeName="kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-utilities" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732938 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-proxy-ca-bundles" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732951 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" volumeName="kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-service-ca-bundle" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732970 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7df94c10-441d-4386-93a6-6730fb7bcde0" volumeName="kubernetes.io/secret/7df94c10-441d-4386-93a6-6730fb7bcde0-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732985 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-certificates" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.732998 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4" volumeName="kubernetes.io/projected/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-kube-api-access-pgx6b" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733013 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="584e1f4a-8205-47d7-8efb-3afc6017c4c9" volumeName="kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-utilities" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733030 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="736c54fe-349c-4bb9-870a-d1c1d1c03831" volumeName="kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-client-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733044 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="92dfbade-90b6-4169-8c07-72cff7f2c82b" volumeName="kubernetes.io/empty-dir/92dfbade-90b6-4169-8c07-72cff7f2c82b-tmp-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733059 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a7a88189-c967-4640-879e-27665747f20c" volumeName="kubernetes.io/empty-dir/a7a88189-c967-4640-879e-27665747f20c-tmpfs" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733074 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ce090a97-9ab6-4c40-a719-64ff2acd9778" volumeName="kubernetes.io/projected/ce090a97-9ab6-4c40-a719-64ff2acd9778-kube-api-access-xnxbn" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733089 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/empty-dir/567683bd-0efc-4f21-b076-e28559628404-tmp-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733103 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733118 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c5f2bfad-70f6-4185-a3d9-81ce12720767" volumeName="kubernetes.io/projected/c5f2bfad-70f6-4185-a3d9-81ce12720767-kube-api-access" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733132 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-image-import-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733148 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01080b46-74f1-4191-8755-5152a57b3b25" volumeName="kubernetes.io/configmap/01080b46-74f1-4191-8755-5152a57b3b25-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733163 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" volumeName="kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733178 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="17b87002-b798-480a-8e17-83053d698239" volumeName="kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733194 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-tmp" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733208 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5ebfebf6-3ecd-458e-943f-bb25b52e2718" volumeName="kubernetes.io/projected/5ebfebf6-3ecd-458e-943f-bb25b52e2718-kube-api-access-l87hs" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733221 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733234 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f0bc7fcb0822a2c13eb2d22cd8c0641" volumeName="kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-var-run-kubernetes" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733250 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f71a554-e414-4bc3-96d2-674060397afe" volumeName="kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-bound-sa-token" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733265 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a52afe44-fb37-46ed-a1f8-bf39727a3cbe" volumeName="kubernetes.io/secret/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733301 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-client-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733317 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/secret/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-image-registry-operator-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733331 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" volumeName="kubernetes.io/projected/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-kube-api-access-ks6v2" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733344 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d7e8f42f-dc0e-424b-bb56-5ec849834888" volumeName="kubernetes.io/secret/d7e8f42f-dc0e-424b-bb56-5ec849834888-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733356 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-trusted-ca-bundle" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733369 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/projected/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-kube-api-access-l9stx" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733383 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5ebfebf6-3ecd-458e-943f-bb25b52e2718" volumeName="kubernetes.io/configmap/5ebfebf6-3ecd-458e-943f-bb25b52e2718-serviceca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733424 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-service-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733439 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc4541ce-7789-4670-bc75-5c2868e52ce0" volumeName="kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-env-overrides" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733454 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="593a3561-7760-45c5-8f91-5aaef7475d0f" volumeName="kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-node-bootstrap-token" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733468 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ee8fbd3-1f81-4666-96da-5afc70819f1a" volumeName="kubernetes.io/secret/6ee8fbd3-1f81-4666-96da-5afc70819f1a-samples-operator-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733483 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" volumeName="kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-profile-collector-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733499 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af33e427-6803-48c2-a76a-dd9deb7cbf9a" volumeName="kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-env-overrides" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733513 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733527 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" volumeName="kubernetes.io/secret/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733541 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc8db2c7-859d-47b3-a900-2bd0c0b2973b" volumeName="kubernetes.io/secret/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-machine-approver-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733556 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31fa8943-81cc-4750-a0b7-0fa9ab5af883" volumeName="kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-catalog-content" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733573 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" volumeName="kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-srv-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733589 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="869851b9-7ffb-4af0-b166-1d8aa40a5f80" volumeName="kubernetes.io/projected/869851b9-7ffb-4af0-b166-1d8aa40a5f80-kube-api-access-mjwtd" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733604 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/secret/a555ff2e-0be6-46d5-897d-863bb92ae2b3-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733619 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-encryption-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733633 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc4541ce-7789-4670-bc75-5c2868e52ce0" volumeName="kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-ovnkube-identity-cm" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733648 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733662 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7afa918d-be67-40a6-803c-d3b0ae99d815" volumeName="kubernetes.io/configmap/7afa918d-be67-40a6-803c-d3b0ae99d815-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733676 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0effdbcf-dd7d-404d-9d48-77536d665a5d" volumeName="kubernetes.io/projected/0effdbcf-dd7d-404d-9d48-77536d665a5d-kube-api-access-mfzkj" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733690 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="149b3c48-e17c-4a66-a835-d86dabf6ff13" volumeName="kubernetes.io/projected/149b3c48-e17c-4a66-a835-d86dabf6ff13-kube-api-access-wj4qr" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733705 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f0bc7fcb0822a2c13eb2d22cd8c0641" volumeName="kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-ca-trust-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733718 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af33e427-6803-48c2-a76a-dd9deb7cbf9a" volumeName="kubernetes.io/projected/af33e427-6803-48c2-a76a-dd9deb7cbf9a-kube-api-access-z5rsr" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733734 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01080b46-74f1-4191-8755-5152a57b3b25" volumeName="kubernetes.io/secret/01080b46-74f1-4191-8755-5152a57b3b25-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733774 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3a14caf222afb62aaabdc47808b6f944" volumeName="kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733790 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-etcd-client" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733803 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733818 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a7a88189-c967-4640-879e-27665747f20c" volumeName="kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-webhook-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733830 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af41de71-79cf-4590-bbe9-9e8b848862cb" volumeName="kubernetes.io/projected/af41de71-79cf-4590-bbe9-9e8b848862cb-kube-api-access-d7cps" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733845 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-serving-ca" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733859 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20c5c5b4bed930554494851fe3cb2b2a" volumeName="kubernetes.io/empty-dir/20c5c5b4bed930554494851fe3cb2b2a-tmp-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733873 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-audit" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733889 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" volumeName="kubernetes.io/secret/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733902 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" volumeName="kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-trusted-ca-bundle" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733919 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e1d2a42d-af1d-4054-9618-ab545e0ed8b7" volumeName="kubernetes.io/secret/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-proxy-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733936 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="42a11a02-47e1-488f-b270-2679d3298b0e" volumeName="kubernetes.io/secret/42a11a02-47e1-488f-b270-2679d3298b0e-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733951 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="584e1f4a-8205-47d7-8efb-3afc6017c4c9" volumeName="kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-catalog-content" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.733965 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-session" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734005 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="869851b9-7ffb-4af0-b166-1d8aa40a5f80" volumeName="kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-whereabouts-flatfile-configmap" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734021 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734035 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f65c0ac1-8bca-454d-a2e6-e35cb418beac" volumeName="kubernetes.io/empty-dir/f65c0ac1-8bca-454d-a2e6-e35cb418beac-tmp-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734051 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0dd0fbac-8c0d-4228-8faa-abbeedabf7db" volumeName="kubernetes.io/projected/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-kube-api-access-q4smf" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734065 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734080 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" volumeName="kubernetes.io/projected/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-kube-api-access-xxfcv" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734095 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="92dfbade-90b6-4169-8c07-72cff7f2c82b" volumeName="kubernetes.io/secret/92dfbade-90b6-4169-8c07-72cff7f2c82b-metrics-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734109 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/projected/a555ff2e-0be6-46d5-897d-863bb92ae2b3-kube-api-access-8pskd" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734122 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4" volumeName="kubernetes.io/secret/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-metrics-certs" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734136 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="42a11a02-47e1-488f-b270-2679d3298b0e" volumeName="kubernetes.io/projected/42a11a02-47e1-488f-b270-2679d3298b0e-kube-api-access-qgrkj" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734149 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="81e39f7b-62e4-4fc9-992a-6535ce127a02" volumeName="kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-cni-binary-copy" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734162 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="81e39f7b-62e4-4fc9-992a-6535ce127a02" volumeName="kubernetes.io/projected/81e39f7b-62e4-4fc9-992a-6535ce127a02-kube-api-access-pllx6" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734181 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="94a6e063-3d1a-4d44-875d-185291448c31" volumeName="kubernetes.io/projected/94a6e063-3d1a-4d44-875d-185291448c31-kube-api-access-4hb7m" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734201 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734219 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af33e427-6803-48c2-a76a-dd9deb7cbf9a" volumeName="kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-script-lib" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734236 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a52afe44-fb37-46ed-a1f8-bf39727a3cbe" volumeName="kubernetes.io/projected/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-kube-api-access-rzt4w" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734248 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734262 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc4541ce-7789-4670-bc75-5c2868e52ce0" volumeName="kubernetes.io/projected/fc4541ce-7789-4670-bc75-5c2868e52ce0-kube-api-access-8nt2j" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734279 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="18f80adb-c1c3-49ba-8ee4-932c851d3897" volumeName="kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-metrics-certs" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734316 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="18f80adb-c1c3-49ba-8ee4-932c851d3897" volumeName="kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-default-certificate" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734335 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7df94c10-441d-4386-93a6-6730fb7bcde0" volumeName="kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-ovnkube-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734350 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af33e427-6803-48c2-a76a-dd9deb7cbf9a" volumeName="kubernetes.io/secret/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovn-node-metrics-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734363 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-login" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734380 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734422 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f65c0ac1-8bca-454d-a2e6-e35cb418beac" volumeName="kubernetes.io/projected/f65c0ac1-8bca-454d-a2e6-e35cb418beac-kube-api-access" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734440 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d565531a-ff86-4608-9d19-767de01ac31b" volumeName="kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-auth-proxy-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734456 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="301e1965-1754-483d-b6cc-bfae7038bbca" volumeName="kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-profile-collector-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734504 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="736c54fe-349c-4bb9-870a-d1c1d1c03831" volumeName="kubernetes.io/empty-dir/736c54fe-349c-4bb9-870a-d1c1d1c03831-tmp" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734524 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" volumeName="kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-utilities" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734544 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-client" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734561 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" volumeName="kubernetes.io/empty-dir/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-tmp" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734576 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734592 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="16bdd140-dce1-464c-ab47-dd5798d1d256" volumeName="kubernetes.io/empty-dir/16bdd140-dce1-464c-ab47-dd5798d1d256-available-featuregates" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734615 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3a14caf222afb62aaabdc47808b6f944" volumeName="kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734662 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f71a554-e414-4bc3-96d2-674060397afe" volumeName="kubernetes.io/secret/9f71a554-e414-4bc3-96d2-674060397afe-metrics-tls" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734680 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f863fff9-286a-45fa-b8f0-8a86994b8440" volumeName="kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734698 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-ca-trust-extracted-pem" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734719 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" volumeName="kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-catalog-content" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734737 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c491984c-7d4b-44aa-8c1e-d7974424fa47" volumeName="kubernetes.io/projected/c491984c-7d4b-44aa-8c1e-d7974424fa47-kube-api-access-9vsz9" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734754 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c5f2bfad-70f6-4185-a3d9-81ce12720767" volumeName="kubernetes.io/secret/c5f2bfad-70f6-4185-a3d9-81ce12720767-serving-cert" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734773 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734790 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ee8fbd3-1f81-4666-96da-5afc70819f1a" volumeName="kubernetes.io/projected/6ee8fbd3-1f81-4666-96da-5afc70819f1a-kube-api-access-d4tqq" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734808 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cc85e424-18b2-4924-920b-bd291a8c4b01" volumeName="kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-utilities" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734825 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/projected/f559dfa3-3917-43a2-97f6-61ddfda10e93-kube-api-access-hm9x7" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734845 5119 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09cfa50b-4138-4585-a53e-64dd3ab73335" volumeName="kubernetes.io/configmap/09cfa50b-4138-4585-a53e-64dd3ab73335-config" seLinuxMountContext="" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734860 5119 reconstruct.go:97] "Volume reconstruction finished" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.734871 5119 reconciler.go:26] "Reconciler: start to sync state" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.735156 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.740427 5119 kubelet_network_linux.go:49] "Initialized iptables rules." protocol="IPv4" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.747589 5119 kubelet_network_linux.go:49] "Initialized iptables rules." protocol="IPv6" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.747650 5119 status_manager.go:230] "Starting to sync pod status with apiserver" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.747678 5119 watchdog_linux.go:127] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.747691 5119 kubelet.go:2451] "Starting kubelet main sync loop" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.747843 5119 kubelet.go:2475] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.749148 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.767855 5119 manager.go:341] "Starting Device Plugin manager" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.768215 5119 manager.go:517] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.768239 5119 server.go:85] "Starting device plugin registration server" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.768784 5119 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.768806 5119 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.769188 5119 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.769268 5119 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.769276 5119 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.778549 5119 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="non-existent label \"crio-containers\"" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.778614 5119 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.842875 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="400ms" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.847999 5119 kubelet.go:2537] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.848319 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.849382 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.849469 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.849502 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.850625 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.850797 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.850855 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.851620 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.851667 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.851682 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.852060 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.852103 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.852118 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.852472 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.852560 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.852611 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.853137 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.853169 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.853201 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.853212 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.853269 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.853302 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.854086 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.854133 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.854172 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.855016 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.855046 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.855105 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.855131 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.855056 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.855234 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.856450 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.856477 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.856508 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.857129 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.857180 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.857206 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.857324 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.857349 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.857360 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.858467 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.858539 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.859192 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.859262 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.859282 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.869913 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.870530 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.870570 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.870582 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.870608 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.871177 5119 kubelet_node_status.go:110] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.198:6443: connect: connection refused" node="crc" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.894757 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.912287 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.932373 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.937127 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-resource-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.937189 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.937232 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.937740 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-tmp-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.937813 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-run-kubernetes\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-var-run-kubernetes\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.937880 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0b638b8f4bb0070e40528db779baf6a2-tmp\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.937923 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-usr-local-bin\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.937964 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-auto-backup-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-etcd-auto-backup-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.937998 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/20c5c5b4bed930554494851fe3cb2b2a-tmp-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938030 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938064 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938099 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938137 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938313 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-trust-dir\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-ca-trust-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938488 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-static-pod-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938542 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-log-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938591 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938637 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938644 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938679 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938726 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-cert-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938765 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-data-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938830 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.938876 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.939629 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-tmp-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.939213 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-trust-dir\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-ca-trust-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.939117 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0b638b8f4bb0070e40528db779baf6a2-tmp\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.939381 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.939267 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-run-kubernetes\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-var-run-kubernetes\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: I0130 00:09:56.940911 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/20c5c5b4bed930554494851fe3cb2b2a-tmp-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.942137 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:56 crc kubenswrapper[5119]: E0130 00:09:56.947062 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.040874 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.040948 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.040979 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041036 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-static-pod-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041065 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-log-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041150 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-log-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041150 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041249 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041301 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-static-pod-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041268 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041318 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041189 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041456 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041494 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041530 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-cert-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041564 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-data-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041578 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041600 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041625 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-cert-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041634 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-resource-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041661 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041666 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041697 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041702 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041731 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-data-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041741 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-usr-local-bin\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041767 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041777 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-auto-backup-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-etcd-auto-backup-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041801 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-resource-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041836 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041873 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-usr-local-bin\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.041977 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-auto-backup-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-etcd-auto-backup-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.072057 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.073531 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.073618 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.073640 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.073692 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:57 crc kubenswrapper[5119]: E0130 00:09:57.074618 5119 kubelet_node_status.go:110] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.198:6443: connect: connection refused" node="crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.196359 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.214537 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.233702 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.242739 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: E0130 00:09:57.244382 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="800ms" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.248291 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:57 crc kubenswrapper[5119]: W0130 00:09:57.261827 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20c5c5b4bed930554494851fe3cb2b2a.slice/crio-dc83aacdb4be0674312f0d433360f1f8afeef16760dd8a70ea1ee62bf14974ae WatchSource:0}: Error finding container dc83aacdb4be0674312f0d433360f1f8afeef16760dd8a70ea1ee62bf14974ae: Status 404 returned error can't find the container with id dc83aacdb4be0674312f0d433360f1f8afeef16760dd8a70ea1ee62bf14974ae Jan 30 00:09:57 crc kubenswrapper[5119]: W0130 00:09:57.267244 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e08c320b1e9e2405e6e0107bdf7eeb4.slice/crio-0771a5ea367529aedb68adbbf039a3d5786ca6dfc7fa305f79908c94aa347d4d WatchSource:0}: Error finding container 0771a5ea367529aedb68adbbf039a3d5786ca6dfc7fa305f79908c94aa347d4d: Status 404 returned error can't find the container with id 0771a5ea367529aedb68adbbf039a3d5786ca6dfc7fa305f79908c94aa347d4d Jan 30 00:09:57 crc kubenswrapper[5119]: W0130 00:09:57.279779 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a14caf222afb62aaabdc47808b6f944.slice/crio-79ef5c241fa9b25d0861bef0caf82a3a6b933bc09fa60f6f4ebd572d3870556c WatchSource:0}: Error finding container 79ef5c241fa9b25d0861bef0caf82a3a6b933bc09fa60f6f4ebd572d3870556c: Status 404 returned error can't find the container with id 79ef5c241fa9b25d0861bef0caf82a3a6b933bc09fa60f6f4ebd572d3870556c Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.280029 5119 provider.go:93] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 00:09:57 crc kubenswrapper[5119]: W0130 00:09:57.291307 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b638b8f4bb0070e40528db779baf6a2.slice/crio-0ccb7c1b20339ddcf47dc3a8409bb7f1c9e2fc3cd09bea071a39e2a985acf7da WatchSource:0}: Error finding container 0ccb7c1b20339ddcf47dc3a8409bb7f1c9e2fc3cd09bea071a39e2a985acf7da: Status 404 returned error can't find the container with id 0ccb7c1b20339ddcf47dc3a8409bb7f1c9e2fc3cd09bea071a39e2a985acf7da Jan 30 00:09:57 crc kubenswrapper[5119]: W0130 00:09:57.293017 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f0bc7fcb0822a2c13eb2d22cd8c0641.slice/crio-4af29c045e6447510fcbdb1502566ebd85abec6814a5f5211c4fa16b8c82ae0a WatchSource:0}: Error finding container 4af29c045e6447510fcbdb1502566ebd85abec6814a5f5211c4fa16b8c82ae0a: Status 404 returned error can't find the container with id 4af29c045e6447510fcbdb1502566ebd85abec6814a5f5211c4fa16b8c82ae0a Jan 30 00:09:57 crc kubenswrapper[5119]: E0130 00:09:57.427907 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.475596 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.476781 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.476824 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.476840 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.476866 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:57 crc kubenswrapper[5119]: E0130 00:09:57.477185 5119 kubelet_node_status.go:110] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.198:6443: connect: connection refused" node="crc" Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.598708 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.198:6443: connect: connection refused Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.753487 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"4af29c045e6447510fcbdb1502566ebd85abec6814a5f5211c4fa16b8c82ae0a"} Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.755702 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"0b638b8f4bb0070e40528db779baf6a2","Type":"ContainerStarted","Data":"0ccb7c1b20339ddcf47dc3a8409bb7f1c9e2fc3cd09bea071a39e2a985acf7da"} Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.757011 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"79ef5c241fa9b25d0861bef0caf82a3a6b933bc09fa60f6f4ebd572d3870556c"} Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.758220 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"dc83aacdb4be0674312f0d433360f1f8afeef16760dd8a70ea1ee62bf14974ae"} Jan 30 00:09:57 crc kubenswrapper[5119]: I0130 00:09:57.759594 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"4e08c320b1e9e2405e6e0107bdf7eeb4","Type":"ContainerStarted","Data":"0771a5ea367529aedb68adbbf039a3d5786ca6dfc7fa305f79908c94aa347d4d"} Jan 30 00:09:57 crc kubenswrapper[5119]: E0130 00:09:57.918178 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:09:57 crc kubenswrapper[5119]: E0130 00:09:57.928983 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:09:57 crc kubenswrapper[5119]: E0130 00:09:57.998550 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:09:58 crc kubenswrapper[5119]: E0130 00:09:58.046063 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="1.6s" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.277583 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.278796 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.278829 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.278838 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.278861 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:58 crc kubenswrapper[5119]: E0130 00:09:58.279288 5119 kubelet_node_status.go:110] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.198:6443: connect: connection refused" node="crc" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.550623 5119 certificate_manager.go:566] "Rotating certificates" logger="kubernetes.io/kube-apiserver-client-kubelet" Jan 30 00:09:58 crc kubenswrapper[5119]: E0130 00:09:58.552570 5119 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.598567 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.198:6443: connect: connection refused Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.766180 5119 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="6cb744f13386a9d31a521fa5f1bf2bc1e5c867622a28725ebe5ec96adaed410d" exitCode=0 Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.766355 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerDied","Data":"6cb744f13386a9d31a521fa5f1bf2bc1e5c867622a28725ebe5ec96adaed410d"} Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.766491 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.768207 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.768263 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.768285 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:58 crc kubenswrapper[5119]: E0130 00:09:58.768704 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.769083 5119 generic.go:358] "Generic (PLEG): container finished" podID="20c5c5b4bed930554494851fe3cb2b2a" containerID="c43657b4b20e341dcfedff314e3d3465e7cfe7bbf41accc0e0e85f10f9c7b5a4" exitCode=0 Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.769171 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerDied","Data":"c43657b4b20e341dcfedff314e3d3465e7cfe7bbf41accc0e0e85f10f9c7b5a4"} Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.769299 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.769955 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.769985 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.769998 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:58 crc kubenswrapper[5119]: E0130 00:09:58.770242 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.770657 5119 generic.go:358] "Generic (PLEG): container finished" podID="4e08c320b1e9e2405e6e0107bdf7eeb4" containerID="c3b5150c83cf2a286e1ea02402aa68603f4b4d47bbd5b7a9b98dd36b6093f984" exitCode=0 Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.770740 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"4e08c320b1e9e2405e6e0107bdf7eeb4","Type":"ContainerDied","Data":"c3b5150c83cf2a286e1ea02402aa68603f4b4d47bbd5b7a9b98dd36b6093f984"} Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.771499 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.771632 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.772189 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.772222 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.772234 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:58 crc kubenswrapper[5119]: E0130 00:09:58.772387 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.772552 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.772575 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.772587 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.773368 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"26f561423f825ded7c40148a15fe8ae193d72852cc00f4c38d26a15e0459e067"} Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.773430 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"b1318090dd33aa89fd1dbebbe525d86fe9687c107990e001fccdb19bff19c2fd"} Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.776054 5119 generic.go:358] "Generic (PLEG): container finished" podID="0b638b8f4bb0070e40528db779baf6a2" containerID="9806c9a05c8e04261d45e139bdd205f5599c68f41d7290cdd95fb5dc65be4755" exitCode=0 Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.776098 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"0b638b8f4bb0070e40528db779baf6a2","Type":"ContainerDied","Data":"9806c9a05c8e04261d45e139bdd205f5599c68f41d7290cdd95fb5dc65be4755"} Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.776195 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.777028 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.777055 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:58 crc kubenswrapper[5119]: I0130 00:09:58.777066 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:58 crc kubenswrapper[5119]: E0130 00:09:58.777252 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.598326 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.198:6443: connect: connection refused Jan 30 00:09:59 crc kubenswrapper[5119]: E0130 00:09:59.647910 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="3.2s" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.780970 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"24d0493bc380958b22799e8abc55dbf5ba40bd473d68ac50594d66012108fb54"} Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.781031 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"19610627b078e161daa90c9332e976149504a06e21fdad1a326056db66c382e7"} Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.781189 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.782205 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.782251 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.782265 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:59 crc kubenswrapper[5119]: E0130 00:09:59.782556 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.783529 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.784431 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"0b638b8f4bb0070e40528db779baf6a2","Type":"ContainerStarted","Data":"76c06870c12f5d5668f1c7b7bc8ce2a5614be4f6683f4be3deec72bd8c765802"} Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.784465 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"0b638b8f4bb0070e40528db779baf6a2","Type":"ContainerStarted","Data":"25355ecadd6dcd398c71e3a7073c9f69211f36b2afac9f2f6984d6df7ac981fa"} Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.784653 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.784717 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.784743 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.784765 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"0b638b8f4bb0070e40528db779baf6a2","Type":"ContainerStarted","Data":"a111fb94f1a5c8692743f3b12a7215a12315afa40d7e89530817ba3c83892220"} Jan 30 00:09:59 crc kubenswrapper[5119]: E0130 00:09:59.785071 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.792928 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"3ec64836842604d724c5b3fc6e03787859f37bb6f3f2d868b57963814407dba3"} Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.792990 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"8d35f4604e32d0d4804a3b34156ed8698a40a743f7ce10ed428780839daeab66"} Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.793011 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"5cc2f86492ba54c66d9c6c1a9a34f75bf42fbaa9909b03d0311777b0c0a3795c"} Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.793028 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"71ec6c2a4f2b4ceaf5bd2fe00c0dcc945915014237a6dfe3044ada4899a26c42"} Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.797243 5119 generic.go:358] "Generic (PLEG): container finished" podID="20c5c5b4bed930554494851fe3cb2b2a" containerID="1c3d092f2660f2aa973d143779dbceb7b50b612d7c321d44e9b2f15927a434cf" exitCode=0 Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.797338 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerDied","Data":"1c3d092f2660f2aa973d143779dbceb7b50b612d7c321d44e9b2f15927a434cf"} Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.797500 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.798125 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.798153 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.798165 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:59 crc kubenswrapper[5119]: E0130 00:09:59.798406 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.813276 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"4e08c320b1e9e2405e6e0107bdf7eeb4","Type":"ContainerStarted","Data":"85041ef0cd373f79431708df645822f3e2297976dc78bf3d5d6c3f2fd983b55b"} Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.813385 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.814080 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.814149 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.814424 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:59 crc kubenswrapper[5119]: E0130 00:09:59.814796 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.879651 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.881378 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.881557 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.881596 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:59 crc kubenswrapper[5119]: I0130 00:09:59.881786 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:59 crc kubenswrapper[5119]: E0130 00:09:59.882385 5119 kubelet_node_status.go:110] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.198:6443: connect: connection refused" node="crc" Jan 30 00:10:00 crc kubenswrapper[5119]: E0130 00:10:00.341558 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.198:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.822062 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"b085449eee5b5006ae435572a4c554a45379becb05169559544ecf7d8bb9b8ae"} Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.822423 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.823265 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.823319 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.823344 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:00 crc kubenswrapper[5119]: E0130 00:10:00.823819 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.826344 5119 generic.go:358] "Generic (PLEG): container finished" podID="20c5c5b4bed930554494851fe3cb2b2a" containerID="bc1ae53975abf21f2965d1f74a1167fbfee499d97830ad56708df3c81cf0083a" exitCode=0 Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.826444 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerDied","Data":"bc1ae53975abf21f2965d1f74a1167fbfee499d97830ad56708df3c81cf0083a"} Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.826620 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.826651 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.826747 5119 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.826847 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.827121 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.827476 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.827491 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.827697 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.827882 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.827955 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.827971 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.828029 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.828072 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.828087 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:00 crc kubenswrapper[5119]: E0130 00:10:00.828677 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:00 crc kubenswrapper[5119]: E0130 00:10:00.829522 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:00 crc kubenswrapper[5119]: E0130 00:10:00.829529 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.829866 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.829924 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:00 crc kubenswrapper[5119]: I0130 00:10:00.829951 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:00 crc kubenswrapper[5119]: E0130 00:10:00.830369 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:01 crc kubenswrapper[5119]: I0130 00:10:01.837536 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"b9aba26083b8cd47e9a89c003b6fec66d485c32c4e80234a1f3e95d56d86e185"} Jan 30 00:10:01 crc kubenswrapper[5119]: I0130 00:10:01.837595 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"67b9fad7cb88a39c44754f831ea6adeeea20e24f7bf2131cfd0d76dde042924c"} Jan 30 00:10:01 crc kubenswrapper[5119]: I0130 00:10:01.837608 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"e3beffa08faf81da0394c450397815931c72fd49f42fd6218edbaac85fe6528f"} Jan 30 00:10:01 crc kubenswrapper[5119]: I0130 00:10:01.837664 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"1abdada27c369710bf3ba52e3f0e584cecec6ad6f6f11b5757c0d0748d7ae54c"} Jan 30 00:10:01 crc kubenswrapper[5119]: I0130 00:10:01.837702 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:01 crc kubenswrapper[5119]: I0130 00:10:01.837760 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:01 crc kubenswrapper[5119]: I0130 00:10:01.838455 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:01 crc kubenswrapper[5119]: I0130 00:10:01.838500 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:01 crc kubenswrapper[5119]: I0130 00:10:01.838515 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:01 crc kubenswrapper[5119]: E0130 00:10:01.838889 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.005750 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.800891 5119 certificate_manager.go:566] "Rotating certificates" logger="kubernetes.io/kube-apiserver-client-kubelet" Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.847029 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"1e870376ee035d24f0f3af0d51bbb91b9860fd137c3f88364c555717aca89a87"} Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.847156 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.847246 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.848681 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.848745 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.848771 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.849029 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.849057 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:02 crc kubenswrapper[5119]: I0130 00:10:02.849072 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:02 crc kubenswrapper[5119]: E0130 00:10:02.849245 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:02 crc kubenswrapper[5119]: E0130 00:10:02.849509 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.082919 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.084126 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.084167 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.084179 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.084204 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.135657 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.136041 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.137561 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.137635 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.137658 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:03 crc kubenswrapper[5119]: E0130 00:10:03.138258 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.215596 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.328545 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.850258 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.850539 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.850627 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.851304 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.851378 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.851429 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.851666 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.851695 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.851727 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.851744 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.851782 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:03 crc kubenswrapper[5119]: I0130 00:10:03.851826 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:03 crc kubenswrapper[5119]: E0130 00:10:03.852111 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:03 crc kubenswrapper[5119]: E0130 00:10:03.852415 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:03 crc kubenswrapper[5119]: E0130 00:10:03.852616 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:04 crc kubenswrapper[5119]: I0130 00:10:04.715005 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-etcd/etcd-crc" Jan 30 00:10:04 crc kubenswrapper[5119]: I0130 00:10:04.853741 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:04 crc kubenswrapper[5119]: I0130 00:10:04.855068 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:04 crc kubenswrapper[5119]: I0130 00:10:04.857024 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:04 crc kubenswrapper[5119]: I0130 00:10:04.857121 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:04 crc kubenswrapper[5119]: I0130 00:10:04.857156 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:04 crc kubenswrapper[5119]: E0130 00:10:04.858648 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:04 crc kubenswrapper[5119]: I0130 00:10:04.861853 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:04 crc kubenswrapper[5119]: I0130 00:10:04.861912 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:04 crc kubenswrapper[5119]: I0130 00:10:04.861931 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:04 crc kubenswrapper[5119]: E0130 00:10:04.862502 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:05 crc kubenswrapper[5119]: I0130 00:10:05.580347 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:05 crc kubenswrapper[5119]: I0130 00:10:05.580684 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:05 crc kubenswrapper[5119]: I0130 00:10:05.581959 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:05 crc kubenswrapper[5119]: I0130 00:10:05.582072 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:05 crc kubenswrapper[5119]: I0130 00:10:05.582104 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:05 crc kubenswrapper[5119]: E0130 00:10:05.582605 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:06 crc kubenswrapper[5119]: I0130 00:10:06.135374 5119 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://localhost:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 30 00:10:06 crc kubenswrapper[5119]: I0130 00:10:06.135582 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://localhost:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 30 00:10:06 crc kubenswrapper[5119]: E0130 00:10:06.779588 5119 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:07 crc kubenswrapper[5119]: I0130 00:10:07.041154 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:10:07 crc kubenswrapper[5119]: I0130 00:10:07.041589 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:07 crc kubenswrapper[5119]: I0130 00:10:07.042891 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:07 crc kubenswrapper[5119]: I0130 00:10:07.042955 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:07 crc kubenswrapper[5119]: I0130 00:10:07.042974 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:07 crc kubenswrapper[5119]: E0130 00:10:07.043588 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.069608 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.070014 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.072989 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.073067 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.073082 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:08 crc kubenswrapper[5119]: E0130 00:10:08.073600 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.077661 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.865279 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.866424 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.866472 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.866487 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:08 crc kubenswrapper[5119]: E0130 00:10:08.866926 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:08 crc kubenswrapper[5119]: I0130 00:10:08.873341 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:09 crc kubenswrapper[5119]: I0130 00:10:09.565819 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 30 00:10:09 crc kubenswrapper[5119]: I0130 00:10:09.566190 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:09 crc kubenswrapper[5119]: I0130 00:10:09.567329 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:09 crc kubenswrapper[5119]: I0130 00:10:09.567414 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:09 crc kubenswrapper[5119]: I0130 00:10:09.567431 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:09 crc kubenswrapper[5119]: E0130 00:10:09.567935 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:09 crc kubenswrapper[5119]: I0130 00:10:09.867553 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:09 crc kubenswrapper[5119]: I0130 00:10:09.868179 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:09 crc kubenswrapper[5119]: I0130 00:10:09.868234 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:09 crc kubenswrapper[5119]: I0130 00:10:09.868247 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:09 crc kubenswrapper[5119]: E0130 00:10:09.868720 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:10 crc kubenswrapper[5119]: I0130 00:10:10.447498 5119 trace.go:236] Trace[1594940583]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 00:10:00.445) (total time: 10001ms): Jan 30 00:10:10 crc kubenswrapper[5119]: Trace[1594940583]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (00:10:10.447) Jan 30 00:10:10 crc kubenswrapper[5119]: Trace[1594940583]: [10.001511064s] [10.001511064s] END Jan 30 00:10:10 crc kubenswrapper[5119]: E0130 00:10:10.447572 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:10:10 crc kubenswrapper[5119]: I0130 00:10:10.528410 5119 trace.go:236] Trace[373814095]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 00:10:00.527) (total time: 10001ms): Jan 30 00:10:10 crc kubenswrapper[5119]: Trace[373814095]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (00:10:10.528) Jan 30 00:10:10 crc kubenswrapper[5119]: Trace[373814095]: [10.001229432s] [10.001229432s] END Jan 30 00:10:10 crc kubenswrapper[5119]: E0130 00:10:10.528539 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:10:10 crc kubenswrapper[5119]: I0130 00:10:10.556613 5119 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 30 00:10:10 crc kubenswrapper[5119]: I0130 00:10:10.556708 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 30 00:10:10 crc kubenswrapper[5119]: I0130 00:10:10.562009 5119 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 30 00:10:10 crc kubenswrapper[5119]: I0130 00:10:10.562079 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 30 00:10:12 crc kubenswrapper[5119]: I0130 00:10:12.013733 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:12 crc kubenswrapper[5119]: I0130 00:10:12.013947 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:12 crc kubenswrapper[5119]: I0130 00:10:12.014809 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:12 crc kubenswrapper[5119]: I0130 00:10:12.014850 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:12 crc kubenswrapper[5119]: I0130 00:10:12.014861 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:12 crc kubenswrapper[5119]: E0130 00:10:12.015138 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:12 crc kubenswrapper[5119]: I0130 00:10:12.019283 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:12 crc kubenswrapper[5119]: E0130 00:10:12.849337 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Jan 30 00:10:12 crc kubenswrapper[5119]: I0130 00:10:12.875065 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:12 crc kubenswrapper[5119]: I0130 00:10:12.875770 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:12 crc kubenswrapper[5119]: I0130 00:10:12.875840 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:12 crc kubenswrapper[5119]: I0130 00:10:12.875908 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:12 crc kubenswrapper[5119]: E0130 00:10:12.876513 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:14 crc kubenswrapper[5119]: E0130 00:10:14.001652 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.550011 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.551610 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: nodes \"crc\" is forbidden: User \"system:anonymous\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.551742 5119 trace.go:236] Trace[737321905]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 00:10:01.091) (total time: 14460ms): Jan 30 00:10:15 crc kubenswrapper[5119]: Trace[737321905]: ---"Objects listed" error:runtimeclasses.node.k8s.io is forbidden: User "system:anonymous" cannot list resource "runtimeclasses" in API group "node.k8s.io" at the cluster scope 14460ms (00:10:15.551) Jan 30 00:10:15 crc kubenswrapper[5119]: Trace[737321905]: [14.460082661s] [14.460082661s] END Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.551690 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b14dadbd0d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.627209485 +0000 UTC m=+0.641271984,LastTimestamp:2026-01-30 00:09:56.627209485 +0000 UTC m=+0.641271984,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.551794 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: runtimeclasses.node.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"runtimeclasses\" in API group \"node.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.553341 5119 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.553658 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b15237007a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703314042 +0000 UTC m=+0.717376491,LastTimestamp:2026-01-30 00:09:56.703314042 +0000 UTC m=+0.717376491,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.560810 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b152374378 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703331192 +0000 UTC m=+0.717393651,LastTimestamp:2026-01-30 00:09:56.703331192 +0000 UTC m=+0.717393651,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.566884 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b1523773ca default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703343562 +0000 UTC m=+0.717406021,LastTimestamp:2026-01-30 00:09:56.703343562 +0000 UTC m=+0.717406021,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.573510 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b1566bd494 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeAllocatableEnforced,Message:Updated Node Allocatable limit across pods,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.773885076 +0000 UTC m=+0.787947525,LastTimestamp:2026-01-30 00:09:56.773885076 +0000 UTC m=+0.787947525,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.577958 5119 reflector.go:430] "Caches populated" logger="kubernetes.io/kube-apiserver-client-kubelet" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.581098 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b15237007a\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b15237007a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703314042 +0000 UTC m=+0.717376491,LastTimestamp:2026-01-30 00:09:56.849444339 +0000 UTC m=+0.863506798,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.595472 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b152374378\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b152374378 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703331192 +0000 UTC m=+0.717393651,LastTimestamp:2026-01-30 00:09:56.84949523 +0000 UTC m=+0.863557689,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.601151 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.601146 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b1523773ca\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b1523773ca default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703343562 +0000 UTC m=+0.717406021,LastTimestamp:2026-01-30 00:09:56.849509411 +0000 UTC m=+0.863571870,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.603444 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b15237007a\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b15237007a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703314042 +0000 UTC m=+0.717376491,LastTimestamp:2026-01-30 00:09:56.851636621 +0000 UTC m=+0.865699070,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.606522 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b152374378\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b152374378 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703331192 +0000 UTC m=+0.717393651,LastTimestamp:2026-01-30 00:09:56.851675462 +0000 UTC m=+0.865737921,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.610240 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b1523773ca\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b1523773ca default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703343562 +0000 UTC m=+0.717406021,LastTimestamp:2026-01-30 00:09:56.851688483 +0000 UTC m=+0.865750942,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.616706 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b15237007a\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b15237007a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703314042 +0000 UTC m=+0.717376491,LastTimestamp:2026-01-30 00:09:56.852090622 +0000 UTC m=+0.866153091,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.622773 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b152374378\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b152374378 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703331192 +0000 UTC m=+0.717393651,LastTimestamp:2026-01-30 00:09:56.852111443 +0000 UTC m=+0.866173922,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.634010 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.634326 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.635320 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.635364 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.635378 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.635773 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.637616 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b1523773ca\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b1523773ca default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703343562 +0000 UTC m=+0.717406021,LastTimestamp:2026-01-30 00:09:56.852125043 +0000 UTC m=+0.866187512,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.642918 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b15237007a\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b15237007a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703314042 +0000 UTC m=+0.717376491,LastTimestamp:2026-01-30 00:09:56.853156008 +0000 UTC m=+0.867218467,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.648706 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.652103 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b152374378\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b152374378 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703331192 +0000 UTC m=+0.717393651,LastTimestamp:2026-01-30 00:09:56.853175068 +0000 UTC m=+0.867237527,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.658865 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b1523773ca\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b1523773ca default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703343562 +0000 UTC m=+0.717406021,LastTimestamp:2026-01-30 00:09:56.853207959 +0000 UTC m=+0.867270418,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.665027 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b15237007a\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b15237007a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703314042 +0000 UTC m=+0.717376491,LastTimestamp:2026-01-30 00:09:56.85323816 +0000 UTC m=+0.867300659,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.670373 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b152374378\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b152374378 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703331192 +0000 UTC m=+0.717393651,LastTimestamp:2026-01-30 00:09:56.853280641 +0000 UTC m=+0.867343140,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.678021 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b1523773ca\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b1523773ca default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703343562 +0000 UTC m=+0.717406021,LastTimestamp:2026-01-30 00:09:56.853312381 +0000 UTC m=+0.867374870,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.683698 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b15237007a\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b15237007a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703314042 +0000 UTC m=+0.717376491,LastTimestamp:2026-01-30 00:09:56.855038093 +0000 UTC m=+0.869100572,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.689051 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b15237007a\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b15237007a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703314042 +0000 UTC m=+0.717376491,LastTimestamp:2026-01-30 00:09:56.855087464 +0000 UTC m=+0.869149953,Count:8,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.697218 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b152374378\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b152374378 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703331192 +0000 UTC m=+0.717393651,LastTimestamp:2026-01-30 00:09:56.855119834 +0000 UTC m=+0.869182333,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.704369 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b1523773ca\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b1523773ca default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703343562 +0000 UTC m=+0.717406021,LastTimestamp:2026-01-30 00:09:56.855142815 +0000 UTC m=+0.869205324,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.711168 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59b152374378\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59b152374378 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:56.703331192 +0000 UTC m=+0.717393651,LastTimestamp:2026-01-30 00:09:56.855212807 +0000 UTC m=+0.869275266,Count:8,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.718090 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59b1749db879 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:57.280471161 +0000 UTC m=+1.294533610,LastTimestamp:2026-01-30 00:09:57.280471161 +0000 UTC m=+1.294533610,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.730102 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b174b83e54 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:57.282209364 +0000 UTC m=+1.296271833,LastTimestamp:2026-01-30 00:09:57.282209364 +0000 UTC m=+1.296271833,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.734862 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b174c8bd0c openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:57.28329038 +0000 UTC m=+1.297352879,LastTimestamp:2026-01-30 00:09:57.28329038 +0000 UTC m=+1.297352879,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.740712 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b175b1d73e openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:57.298566974 +0000 UTC m=+1.312629473,LastTimestamp:2026-01-30 00:09:57.298566974 +0000 UTC m=+1.312629473,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.749222 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b175b203ee openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:57.298578414 +0000 UTC m=+1.312640873,LastTimestamp:2026-01-30 00:09:57.298578414 +0000 UTC m=+1.312640873,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.753402 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1a07e8346 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Created,Message:Created container: wait-for-host-port,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.01662343 +0000 UTC m=+2.030685889,LastTimestamp:2026-01-30 00:09:58.01662343 +0000 UTC m=+2.030685889,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.757318 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59b1a0802a72 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container: setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.016731762 +0000 UTC m=+2.030794221,LastTimestamp:2026-01-30 00:09:58.016731762 +0000 UTC m=+2.030794221,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.761444 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1a084eb78 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Created,Message:Created container: kube-controller-manager,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.01704332 +0000 UTC m=+2.031105789,LastTimestamp:2026-01-30 00:09:58.01704332 +0000 UTC m=+2.031105789,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.764900 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b1a08604c4 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container: setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.017115332 +0000 UTC m=+2.031177791,LastTimestamp:2026-01-30 00:09:58.017115332 +0000 UTC m=+2.031177791,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.768663 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1a09f466e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container: setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.018770542 +0000 UTC m=+2.032833001,LastTimestamp:2026-01-30 00:09:58.018770542 +0000 UTC m=+2.032833001,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.772919 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1a16a1f8e openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Started,Message:Started container kube-controller-manager,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.032064398 +0000 UTC m=+2.046126857,LastTimestamp:2026-01-30 00:09:58.032064398 +0000 UTC m=+2.046126857,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.777077 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1a16ac90d openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Started,Message:Started container wait-for-host-port,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.032107789 +0000 UTC m=+2.046170248,LastTimestamp:2026-01-30 00:09:58.032107789 +0000 UTC m=+2.046170248,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.781381 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1a17d27c8 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.033311688 +0000 UTC m=+2.047374147,LastTimestamp:2026-01-30 00:09:58.033311688 +0000 UTC m=+2.047374147,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.786536 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59b1a1b8bdac openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.037216684 +0000 UTC m=+2.051279143,LastTimestamp:2026-01-30 00:09:58.037216684 +0000 UTC m=+2.051279143,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.790528 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1a1b9a2bd openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.037275325 +0000 UTC m=+2.051337784,LastTimestamp:2026-01-30 00:09:58.037275325 +0000 UTC m=+2.051337784,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.795417 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b1a1c0a65e openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.037735006 +0000 UTC m=+2.051797465,LastTimestamp:2026-01-30 00:09:58.037735006 +0000 UTC m=+2.051797465,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.801171 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1b34863bb openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Created,Message:Created container: cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.331843515 +0000 UTC m=+2.345905974,LastTimestamp:2026-01-30 00:09:58.331843515 +0000 UTC m=+2.345905974,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.806913 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1b433d7b3 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Started,Message:Started container cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.347274163 +0000 UTC m=+2.361336622,LastTimestamp:2026-01-30 00:09:58.347274163 +0000 UTC m=+2.361336622,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.811263 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1b44a2a5f openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.348737119 +0000 UTC m=+2.362799578,LastTimestamp:2026-01-30 00:09:58.348737119 +0000 UTC m=+2.362799578,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.815421 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1cd777784 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.771136388 +0000 UTC m=+2.785198867,LastTimestamp:2026-01-30 00:09:58.771136388 +0000 UTC m=+2.785198867,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.819061 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b1cd86a51e openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.772131102 +0000 UTC m=+2.786193581,LastTimestamp:2026-01-30 00:09:58.772131102 +0000 UTC m=+2.786193581,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.823666 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59b1cd9bdf50 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.773522256 +0000 UTC m=+2.787584755,LastTimestamp:2026-01-30 00:09:58.773522256 +0000 UTC m=+2.787584755,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.829458 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1cde75f03 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.778470147 +0000 UTC m=+2.792532606,LastTimestamp:2026-01-30 00:09:58.778470147 +0000 UTC m=+2.792532606,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.833266 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1d4d4dcaa openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Created,Message:Created container: kube-controller-manager-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.894697642 +0000 UTC m=+2.908760101,LastTimestamp:2026-01-30 00:09:58.894697642 +0000 UTC m=+2.908760101,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.837406 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1d5c27461 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Started,Message:Started container kube-controller-manager-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.910268513 +0000 UTC m=+2.924330972,LastTimestamp:2026-01-30 00:09:58.910268513 +0000 UTC m=+2.924330972,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.841843 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1d5d90731 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:58.911747889 +0000 UTC m=+2.925810338,LastTimestamp:2026-01-30 00:09:58.911747889 +0000 UTC m=+2.925810338,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.847090 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1dfce8315 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Created,Message:Created container: kube-scheduler,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.078830869 +0000 UTC m=+3.092893328,LastTimestamp:2026-01-30 00:09:59.078830869 +0000 UTC m=+3.092893328,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.851667 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1e0570784 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Created,Message:Created container: kube-apiserver,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.087777668 +0000 UTC m=+3.101840127,LastTimestamp:2026-01-30 00:09:59.087777668 +0000 UTC m=+3.101840127,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.855768 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b1e0a6441d openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Created,Message:Created container: etcd-ensure-env-vars,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.092970525 +0000 UTC m=+3.107032984,LastTimestamp:2026-01-30 00:09:59.092970525 +0000 UTC m=+3.107032984,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.861150 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1e0ad4e08 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Started,Message:Started container kube-scheduler,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.093431816 +0000 UTC m=+3.107494285,LastTimestamp:2026-01-30 00:09:59.093431816 +0000 UTC m=+3.107494285,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.866641 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59b1e0ad59de openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Created,Message:Created container: kube-rbac-proxy-crio,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.093434846 +0000 UTC m=+3.107497305,LastTimestamp:2026-01-30 00:09:59.093434846 +0000 UTC m=+3.107497305,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.871133 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1e0bd2a68 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.094471272 +0000 UTC m=+3.108533731,LastTimestamp:2026-01-30 00:09:59.094471272 +0000 UTC m=+3.108533731,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.874943 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1e133a288 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Started,Message:Started container kube-apiserver,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.102235272 +0000 UTC m=+3.116297731,LastTimestamp:2026-01-30 00:09:59.102235272 +0000 UTC m=+3.116297731,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.878953 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1e178b545 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.106762053 +0000 UTC m=+3.120824512,LastTimestamp:2026-01-30 00:09:59.106762053 +0000 UTC m=+3.120824512,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.881471 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.883847 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.883886 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:15 crc kubenswrapper[5119]: I0130 00:10:15.883898 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.884332 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.886364 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59b1e1f62b48 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Started,Message:Started container kube-rbac-proxy-crio,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.114984264 +0000 UTC m=+3.129046723,LastTimestamp:2026-01-30 00:09:59.114984264 +0000 UTC m=+3.129046723,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.890222 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b1e30a227c openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Started,Message:Started container etcd-ensure-env-vars,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.133069948 +0000 UTC m=+3.147132407,LastTimestamp:2026-01-30 00:09:59.133069948 +0000 UTC m=+3.147132407,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.894273 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1e6771dfc openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Created,Message:Created container: kube-controller-manager-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.190543868 +0000 UTC m=+3.204606327,LastTimestamp:2026-01-30 00:09:59.190543868 +0000 UTC m=+3.204606327,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.897653 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b1e75f043a openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Started,Message:Started container kube-controller-manager-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.205741626 +0000 UTC m=+3.219804085,LastTimestamp:2026-01-30 00:09:59.205741626 +0000 UTC m=+3.219804085,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.901198 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1ec453fcd openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Created,Message:Created container: kube-apiserver-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.287939021 +0000 UTC m=+3.302001480,LastTimestamp:2026-01-30 00:09:59.287939021 +0000 UTC m=+3.302001480,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.904763 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:anonymous\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.904750 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1ec610c62 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Created,Message:Created container: kube-scheduler-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.289760866 +0000 UTC m=+3.303823325,LastTimestamp:2026-01-30 00:09:59.289760866 +0000 UTC m=+3.303823325,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.908179 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1ed0b90e0 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Started,Message:Started container kube-apiserver-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.300935904 +0000 UTC m=+3.314998373,LastTimestamp:2026-01-30 00:09:59.300935904 +0000 UTC m=+3.314998373,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.911997 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1ed209dc3 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.302315459 +0000 UTC m=+3.316377938,LastTimestamp:2026-01-30 00:09:59.302315459 +0000 UTC m=+3.316377938,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.915593 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1ed7ef63d openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Started,Message:Started container kube-scheduler-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.308498493 +0000 UTC m=+3.322560952,LastTimestamp:2026-01-30 00:09:59.308498493 +0000 UTC m=+3.322560952,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.919481 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1edaf466f openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.311664751 +0000 UTC m=+3.325727210,LastTimestamp:2026-01-30 00:09:59.311664751 +0000 UTC m=+3.325727210,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.923134 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1fbbb4644 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Created,Message:Created container: kube-apiserver-cert-regeneration-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.547332164 +0000 UTC m=+3.561394623,LastTimestamp:2026-01-30 00:09:59.547332164 +0000 UTC m=+3.561394623,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.926973 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1fbd7023f openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Created,Message:Created container: kube-scheduler-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.549149759 +0000 UTC m=+3.563212218,LastTimestamp:2026-01-30 00:09:59.549149759 +0000 UTC m=+3.563212218,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.930743 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1fc8fdb95 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Started,Message:Started container kube-apiserver-cert-regeneration-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.561264021 +0000 UTC m=+3.575326480,LastTimestamp:2026-01-30 00:09:59.561264021 +0000 UTC m=+3.575326480,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.934484 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b1fca1a338 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.56242924 +0000 UTC m=+3.576491689,LastTimestamp:2026-01-30 00:09:59.56242924 +0000 UTC m=+3.576491689,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.940125 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59b1fcb39cf9 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Started,Message:Started container kube-scheduler-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.563607289 +0000 UTC m=+3.577669748,LastTimestamp:2026-01-30 00:09:59.563607289 +0000 UTC m=+3.577669748,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.944430 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b208bf3f45 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Created,Message:Created container: kube-apiserver-insecure-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.765696325 +0000 UTC m=+3.779758794,LastTimestamp:2026-01-30 00:09:59.765696325 +0000 UTC m=+3.779758794,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.948063 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2098cdaff openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Started,Message:Started container kube-apiserver-insecure-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.779171071 +0000 UTC m=+3.793233530,LastTimestamp:2026-01-30 00:09:59.779171071 +0000 UTC m=+3.793233530,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.952468 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2099b84e1 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.780132065 +0000 UTC m=+3.794194534,LastTimestamp:2026-01-30 00:09:59.780132065 +0000 UTC m=+3.794194534,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.957449 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b20b27ead5 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.806110421 +0000 UTC m=+3.820172920,LastTimestamp:2026-01-30 00:09:59.806110421 +0000 UTC m=+3.820172920,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.961309 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2194b0c91 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Created,Message:Created container: kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.043293841 +0000 UTC m=+4.057356300,LastTimestamp:2026-01-30 00:10:00.043293841 +0000 UTC m=+4.057356300,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.965161 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b2198a6e21 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Created,Message:Created container: etcd-resources-copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.047447585 +0000 UTC m=+4.061510044,LastTimestamp:2026-01-30 00:10:00.047447585 +0000 UTC m=+4.061510044,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.969560 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b21a1ebab4 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Started,Message:Started container kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.057166516 +0000 UTC m=+4.071228975,LastTimestamp:2026-01-30 00:10:00.057166516 +0000 UTC m=+4.071228975,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.970549 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b21a53fbb3 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Started,Message:Started container etcd-resources-copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.060656563 +0000 UTC m=+4.074719022,LastTimestamp:2026-01-30 00:10:00.060656563 +0000 UTC m=+4.074719022,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.973286 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b2484d63aa openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.831976362 +0000 UTC m=+4.846038861,LastTimestamp:2026-01-30 00:10:00.831976362 +0000 UTC m=+4.846038861,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.975632 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b25526d432 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Created,Message:Created container: etcdctl,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.047553074 +0000 UTC m=+5.061615533,LastTimestamp:2026-01-30 00:10:01.047553074 +0000 UTC m=+5.061615533,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.976698 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b255b58a27 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Started,Message:Started container etcdctl,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.056905767 +0000 UTC m=+5.070968236,LastTimestamp:2026-01-30 00:10:01.056905767 +0000 UTC m=+5.070968236,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.979767 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b255c34f2a openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.05780817 +0000 UTC m=+5.071870629,LastTimestamp:2026-01-30 00:10:01.05780817 +0000 UTC m=+5.071870629,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.983599 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b261a5f40a openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Created,Message:Created container: etcd,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.25721089 +0000 UTC m=+5.271273349,LastTimestamp:2026-01-30 00:10:01.25721089 +0000 UTC m=+5.271273349,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.987205 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b2623bd2bd openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Started,Message:Started container etcd,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.267032765 +0000 UTC m=+5.281095224,LastTimestamp:2026-01-30 00:10:01.267032765 +0000 UTC m=+5.281095224,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.991904 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b26248ac00 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.267874816 +0000 UTC m=+5.281937275,LastTimestamp:2026-01-30 00:10:01.267874816 +0000 UTC m=+5.281937275,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:15 crc kubenswrapper[5119]: E0130 00:10:15.996117 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b26ed64981 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Created,Message:Created container: etcd-metrics,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.478482305 +0000 UTC m=+5.492544764,LastTimestamp:2026-01-30 00:10:01.478482305 +0000 UTC m=+5.492544764,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.000538 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b26faebe86 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Started,Message:Started container etcd-metrics,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.492668038 +0000 UTC m=+5.506730497,LastTimestamp:2026-01-30 00:10:01.492668038 +0000 UTC m=+5.506730497,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.004506 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b26fc24f66 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.49395031 +0000 UTC m=+5.508012769,LastTimestamp:2026-01-30 00:10:01.49395031 +0000 UTC m=+5.508012769,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.008648 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b27e90c431 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Created,Message:Created container: etcd-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.742361649 +0000 UTC m=+5.756424128,LastTimestamp:2026-01-30 00:10:01.742361649 +0000 UTC m=+5.756424128,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.012880 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b27fb40454 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Started,Message:Started container etcd-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.761449044 +0000 UTC m=+5.775511503,LastTimestamp:2026-01-30 00:10:01.761449044 +0000 UTC m=+5.775511503,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.017236 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b27fce473b openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-rev},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.763170107 +0000 UTC m=+5.777232566,LastTimestamp:2026-01-30 00:10:01.763170107 +0000 UTC m=+5.777232566,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.022174 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b28a69d7c0 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-rev},},Reason:Created,Message:Created container: etcd-rev,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.941137344 +0000 UTC m=+5.955199813,LastTimestamp:2026-01-30 00:10:01.941137344 +0000 UTC m=+5.955199813,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.029635 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59b28b2e4bd3 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-rev},},Reason:Started,Message:Started container etcd-rev,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:01.954012115 +0000 UTC m=+5.968074614,LastTimestamp:2026-01-30 00:10:01.954012115 +0000 UTC m=+5.968074614,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.036205 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Jan 30 00:10:16 crc kubenswrapper[5119]: &Event{ObjectMeta:{kube-controller-manager-crc.188f59b3846b027c openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://localhost:10357/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Jan 30 00:10:16 crc kubenswrapper[5119]: body: Jan 30 00:10:16 crc kubenswrapper[5119]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:06.13551782 +0000 UTC m=+10.149580319,LastTimestamp:2026-01-30 00:10:06.13551782 +0000 UTC m=+10.149580319,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:16 crc kubenswrapper[5119]: > Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.040130 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b3846d48d7 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://localhost:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:06.135666903 +0000 UTC m=+10.149729402,LastTimestamp:2026-01-30 00:10:06.135666903 +0000 UTC m=+10.149729402,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.044680 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Jan 30 00:10:16 crc kubenswrapper[5119]: &Event{ObjectMeta:{kube-apiserver-crc.188f59b48bf07ab4 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:ProbeError,Message:Startup probe error: HTTP probe failed with statuscode: 403 Jan 30 00:10:16 crc kubenswrapper[5119]: body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 30 00:10:16 crc kubenswrapper[5119]: Jan 30 00:10:16 crc kubenswrapper[5119]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:10.556672692 +0000 UTC m=+14.570735171,LastTimestamp:2026-01-30 00:10:10.556672692 +0000 UTC m=+14.570735171,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:16 crc kubenswrapper[5119]: > Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.048325 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b48bf18e91 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Unhealthy,Message:Startup probe failed: HTTP probe failed with statuscode: 403,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:10.556743313 +0000 UTC m=+14.570805792,LastTimestamp:2026-01-30 00:10:10.556743313 +0000 UTC m=+14.570805792,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.052745 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b48bf07ab4\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Jan 30 00:10:16 crc kubenswrapper[5119]: &Event{ObjectMeta:{kube-apiserver-crc.188f59b48bf07ab4 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:ProbeError,Message:Startup probe error: HTTP probe failed with statuscode: 403 Jan 30 00:10:16 crc kubenswrapper[5119]: body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 30 00:10:16 crc kubenswrapper[5119]: Jan 30 00:10:16 crc kubenswrapper[5119]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:10.556672692 +0000 UTC m=+14.570735171,LastTimestamp:2026-01-30 00:10:10.562052361 +0000 UTC m=+14.576114850,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:16 crc kubenswrapper[5119]: > Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.056621 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b48bf18e91\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b48bf18e91 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Unhealthy,Message:Startup probe failed: HTTP probe failed with statuscode: 403,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:10.556743313 +0000 UTC m=+14.570805792,LastTimestamp:2026-01-30 00:10:10.562107463 +0000 UTC m=+14.576169932,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.228786 5119 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38488->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.228926 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38488->192.168.126.11:17697: read: connection reset by peer" Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.228809 5119 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38502->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.228997 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38502->192.168.126.11:17697: read: connection reset by peer" Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.229277 5119 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.229365 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.233736 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Jan 30 00:10:16 crc kubenswrapper[5119]: &Event{ObjectMeta:{kube-apiserver-crc.188f59b5de079357 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:17697/healthz": read tcp 192.168.126.11:38488->192.168.126.11:17697: read: connection reset by peer Jan 30 00:10:16 crc kubenswrapper[5119]: body: Jan 30 00:10:16 crc kubenswrapper[5119]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:16.228885335 +0000 UTC m=+20.242947794,LastTimestamp:2026-01-30 00:10:16.228885335 +0000 UTC m=+20.242947794,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:16 crc kubenswrapper[5119]: > Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.237628 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b5de08928b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Unhealthy,Message:Readiness probe failed: Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38488->192.168.126.11:17697: read: connection reset by peer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:16.228950667 +0000 UTC m=+20.243013126,LastTimestamp:2026-01-30 00:10:16.228950667 +0000 UTC m=+20.243013126,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.241336 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Jan 30 00:10:16 crc kubenswrapper[5119]: &Event{ObjectMeta:{kube-apiserver-crc.188f59b5de08fd76 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:ProbeError,Message:Liveness probe error: Get "https://192.168.126.11:17697/healthz": read tcp 192.168.126.11:38502->192.168.126.11:17697: read: connection reset by peer Jan 30 00:10:16 crc kubenswrapper[5119]: body: Jan 30 00:10:16 crc kubenswrapper[5119]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:16.228978038 +0000 UTC m=+20.243040497,LastTimestamp:2026-01-30 00:10:16.228978038 +0000 UTC m=+20.243040497,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:16 crc kubenswrapper[5119]: > Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.245619 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b5de0993d1 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Unhealthy,Message:Liveness probe failed: Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38502->192.168.126.11:17697: read: connection reset by peer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:16.229016529 +0000 UTC m=+20.243078998,LastTimestamp:2026-01-30 00:10:16.229016529 +0000 UTC m=+20.243078998,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.249534 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Jan 30 00:10:16 crc kubenswrapper[5119]: &Event{ObjectMeta:{kube-apiserver-crc.188f59b5de0e783a openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:17697/healthz": dial tcp 192.168.126.11:17697: connect: connection refused Jan 30 00:10:16 crc kubenswrapper[5119]: body: Jan 30 00:10:16 crc kubenswrapper[5119]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:16.229337146 +0000 UTC m=+20.243399605,LastTimestamp:2026-01-30 00:10:16.229337146 +0000 UTC m=+20.243399605,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:16 crc kubenswrapper[5119]: > Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.254294 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b5de0f9096 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Unhealthy,Message:Readiness probe failed: Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:16.229408918 +0000 UTC m=+20.243471387,LastTimestamp:2026-01-30 00:10:16.229408918 +0000 UTC m=+20.243471387,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.604710 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.779822 5119 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.884736 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/0.log" Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.886097 5119 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="b085449eee5b5006ae435572a4c554a45379becb05169559544ecf7d8bb9b8ae" exitCode=255 Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.886157 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerDied","Data":"b085449eee5b5006ae435572a4c554a45379becb05169559544ecf7d8bb9b8ae"} Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.886385 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.886918 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.886949 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.886958 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.887209 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:16 crc kubenswrapper[5119]: I0130 00:10:16.887464 5119 scope.go:117] "RemoveContainer" containerID="b085449eee5b5006ae435572a4c554a45379becb05169559544ecf7d8bb9b8ae" Jan 30 00:10:16 crc kubenswrapper[5119]: E0130 00:10:16.893504 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b2099b84e1\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2099b84e1 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.780132065 +0000 UTC m=+3.794194534,LastTimestamp:2026-01-30 00:10:16.888828594 +0000 UTC m=+20.902891053,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:17 crc kubenswrapper[5119]: E0130 00:10:17.123189 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b2194b0c91\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2194b0c91 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Created,Message:Created container: kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.043293841 +0000 UTC m=+4.057356300,LastTimestamp:2026-01-30 00:10:17.118023992 +0000 UTC m=+21.132086451,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:17 crc kubenswrapper[5119]: E0130 00:10:17.141519 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b21a1ebab4\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b21a1ebab4 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Started,Message:Started container kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.057166516 +0000 UTC m=+4.071228975,LastTimestamp:2026-01-30 00:10:17.135009911 +0000 UTC m=+21.149072370,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:17 crc kubenswrapper[5119]: I0130 00:10:17.604614 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:17 crc kubenswrapper[5119]: I0130 00:10:17.891661 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/0.log" Jan 30 00:10:17 crc kubenswrapper[5119]: I0130 00:10:17.893798 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"423bce264b4ea6214812555afabbb25b5896d935e39400d4271d69f481081581"} Jan 30 00:10:17 crc kubenswrapper[5119]: I0130 00:10:17.894032 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:17 crc kubenswrapper[5119]: I0130 00:10:17.894681 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:17 crc kubenswrapper[5119]: I0130 00:10:17.894733 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:17 crc kubenswrapper[5119]: I0130 00:10:17.894753 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:17 crc kubenswrapper[5119]: E0130 00:10:17.895320 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.603654 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.897579 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/1.log" Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.898109 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/0.log" Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.899978 5119 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="423bce264b4ea6214812555afabbb25b5896d935e39400d4271d69f481081581" exitCode=255 Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.900176 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerDied","Data":"423bce264b4ea6214812555afabbb25b5896d935e39400d4271d69f481081581"} Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.900354 5119 scope.go:117] "RemoveContainer" containerID="b085449eee5b5006ae435572a4c554a45379becb05169559544ecf7d8bb9b8ae" Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.900513 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.901186 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.901223 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.901235 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:18 crc kubenswrapper[5119]: E0130 00:10:18.901649 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:18 crc kubenswrapper[5119]: I0130 00:10:18.901948 5119 scope.go:117] "RemoveContainer" containerID="423bce264b4ea6214812555afabbb25b5896d935e39400d4271d69f481081581" Jan 30 00:10:18 crc kubenswrapper[5119]: E0130 00:10:18.902171 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:18 crc kubenswrapper[5119]: E0130 00:10:18.909442 5119 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b67d5e23cc openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:18.902135756 +0000 UTC m=+22.916198235,LastTimestamp:2026-01-30 00:10:18.902135756 +0000 UTC m=+22.916198235,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:19 crc kubenswrapper[5119]: E0130 00:10:19.252735 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:19 crc kubenswrapper[5119]: E0130 00:10:19.396620 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: runtimeclasses.node.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"runtimeclasses\" in API group \"node.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.603071 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.603336 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.604429 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.604497 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.604515 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.604449 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:19 crc kubenswrapper[5119]: E0130 00:10:19.605174 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.617208 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.903730 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/1.log" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.905112 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.905680 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.905711 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:19 crc kubenswrapper[5119]: I0130 00:10:19.905720 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:19 crc kubenswrapper[5119]: E0130 00:10:19.906083 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:20 crc kubenswrapper[5119]: I0130 00:10:20.602732 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:21 crc kubenswrapper[5119]: E0130 00:10:21.520609 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:10:21 crc kubenswrapper[5119]: I0130 00:10:21.605722 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:21 crc kubenswrapper[5119]: I0130 00:10:21.954276 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:21 crc kubenswrapper[5119]: I0130 00:10:21.955484 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:21 crc kubenswrapper[5119]: I0130 00:10:21.955540 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:21 crc kubenswrapper[5119]: I0130 00:10:21.955556 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:21 crc kubenswrapper[5119]: I0130 00:10:21.955599 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:21 crc kubenswrapper[5119]: E0130 00:10:21.965665 5119 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:22 crc kubenswrapper[5119]: I0130 00:10:22.603345 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:23 crc kubenswrapper[5119]: I0130 00:10:23.602426 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:24 crc kubenswrapper[5119]: I0130 00:10:24.601359 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:24 crc kubenswrapper[5119]: E0130 00:10:24.818911 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: nodes \"crc\" is forbidden: User \"system:anonymous\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:10:25 crc kubenswrapper[5119]: I0130 00:10:25.394120 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:25 crc kubenswrapper[5119]: I0130 00:10:25.394502 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:25 crc kubenswrapper[5119]: I0130 00:10:25.395790 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:25 crc kubenswrapper[5119]: I0130 00:10:25.395861 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:25 crc kubenswrapper[5119]: I0130 00:10:25.395882 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:25 crc kubenswrapper[5119]: E0130 00:10:25.396642 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:25 crc kubenswrapper[5119]: I0130 00:10:25.397034 5119 scope.go:117] "RemoveContainer" containerID="423bce264b4ea6214812555afabbb25b5896d935e39400d4271d69f481081581" Jan 30 00:10:25 crc kubenswrapper[5119]: E0130 00:10:25.397346 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:25 crc kubenswrapper[5119]: E0130 00:10:25.402817 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b67d5e23cc\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b67d5e23cc openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:18.902135756 +0000 UTC m=+22.916198235,LastTimestamp:2026-01-30 00:10:25.397293292 +0000 UTC m=+29.411355781,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:25 crc kubenswrapper[5119]: I0130 00:10:25.603652 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:26 crc kubenswrapper[5119]: E0130 00:10:26.257637 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:26 crc kubenswrapper[5119]: I0130 00:10:26.602615 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:26 crc kubenswrapper[5119]: E0130 00:10:26.780016 5119 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:27 crc kubenswrapper[5119]: I0130 00:10:27.601380 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:27 crc kubenswrapper[5119]: I0130 00:10:27.894335 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:27 crc kubenswrapper[5119]: I0130 00:10:27.894613 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:27 crc kubenswrapper[5119]: I0130 00:10:27.895481 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:27 crc kubenswrapper[5119]: I0130 00:10:27.895544 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:27 crc kubenswrapper[5119]: I0130 00:10:27.895555 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:27 crc kubenswrapper[5119]: E0130 00:10:27.895866 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:27 crc kubenswrapper[5119]: I0130 00:10:27.896153 5119 scope.go:117] "RemoveContainer" containerID="423bce264b4ea6214812555afabbb25b5896d935e39400d4271d69f481081581" Jan 30 00:10:27 crc kubenswrapper[5119]: E0130 00:10:27.896364 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:27 crc kubenswrapper[5119]: E0130 00:10:27.901270 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b67d5e23cc\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b67d5e23cc openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:18.902135756 +0000 UTC m=+22.916198235,LastTimestamp:2026-01-30 00:10:27.896332379 +0000 UTC m=+31.910394838,Count:3,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:28 crc kubenswrapper[5119]: E0130 00:10:28.304791 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:anonymous\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:10:28 crc kubenswrapper[5119]: I0130 00:10:28.601142 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:28 crc kubenswrapper[5119]: I0130 00:10:28.966130 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:28 crc kubenswrapper[5119]: I0130 00:10:28.967433 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:28 crc kubenswrapper[5119]: I0130 00:10:28.967514 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:28 crc kubenswrapper[5119]: I0130 00:10:28.967540 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:28 crc kubenswrapper[5119]: I0130 00:10:28.967584 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:28 crc kubenswrapper[5119]: E0130 00:10:28.985337 5119 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:29 crc kubenswrapper[5119]: E0130 00:10:29.138227 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: runtimeclasses.node.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"runtimeclasses\" in API group \"node.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:10:29 crc kubenswrapper[5119]: I0130 00:10:29.602714 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:30 crc kubenswrapper[5119]: I0130 00:10:30.601886 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:31 crc kubenswrapper[5119]: I0130 00:10:31.602863 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:32 crc kubenswrapper[5119]: I0130 00:10:32.605044 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:33 crc kubenswrapper[5119]: E0130 00:10:33.267430 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:33 crc kubenswrapper[5119]: I0130 00:10:33.607166 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:34 crc kubenswrapper[5119]: I0130 00:10:34.606493 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:35 crc kubenswrapper[5119]: I0130 00:10:35.603748 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:35 crc kubenswrapper[5119]: I0130 00:10:35.985992 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:35 crc kubenswrapper[5119]: I0130 00:10:35.987089 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:35 crc kubenswrapper[5119]: I0130 00:10:35.987152 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:35 crc kubenswrapper[5119]: I0130 00:10:35.987164 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:35 crc kubenswrapper[5119]: I0130 00:10:35.987191 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:35 crc kubenswrapper[5119]: E0130 00:10:35.997217 5119 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:36 crc kubenswrapper[5119]: I0130 00:10:36.603548 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:36 crc kubenswrapper[5119]: E0130 00:10:36.781359 5119 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:37 crc kubenswrapper[5119]: I0130 00:10:37.605287 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:38 crc kubenswrapper[5119]: I0130 00:10:38.605097 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:39 crc kubenswrapper[5119]: I0130 00:10:39.606490 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:39 crc kubenswrapper[5119]: E0130 00:10:39.880649 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:10:40 crc kubenswrapper[5119]: E0130 00:10:40.276907 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:40 crc kubenswrapper[5119]: I0130 00:10:40.602243 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:40 crc kubenswrapper[5119]: I0130 00:10:40.748527 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:40 crc kubenswrapper[5119]: I0130 00:10:40.749469 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:40 crc kubenswrapper[5119]: I0130 00:10:40.749525 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:40 crc kubenswrapper[5119]: I0130 00:10:40.749537 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:40 crc kubenswrapper[5119]: E0130 00:10:40.749954 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:40 crc kubenswrapper[5119]: I0130 00:10:40.750239 5119 scope.go:117] "RemoveContainer" containerID="423bce264b4ea6214812555afabbb25b5896d935e39400d4271d69f481081581" Jan 30 00:10:40 crc kubenswrapper[5119]: E0130 00:10:40.757253 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b2099b84e1\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2099b84e1 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:59.780132065 +0000 UTC m=+3.794194534,LastTimestamp:2026-01-30 00:10:40.75122069 +0000 UTC m=+44.765283149,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:40 crc kubenswrapper[5119]: E0130 00:10:40.923741 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b2194b0c91\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2194b0c91 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Created,Message:Created container: kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.043293841 +0000 UTC m=+4.057356300,LastTimestamp:2026-01-30 00:10:40.917952454 +0000 UTC m=+44.932014923,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:40 crc kubenswrapper[5119]: E0130 00:10:40.933180 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b21a1ebab4\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b21a1ebab4 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Started,Message:Started container kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.057166516 +0000 UTC m=+4.071228975,LastTimestamp:2026-01-30 00:10:40.927828852 +0000 UTC m=+44.941891311,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:41 crc kubenswrapper[5119]: I0130 00:10:41.079599 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/1.log" Jan 30 00:10:41 crc kubenswrapper[5119]: I0130 00:10:41.081083 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"7b8b5696bbb27e63d50a4b8217da8f0cdb39a1af5aaabac4f18f16f1f859140c"} Jan 30 00:10:41 crc kubenswrapper[5119]: I0130 00:10:41.081464 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:41 crc kubenswrapper[5119]: I0130 00:10:41.082060 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:41 crc kubenswrapper[5119]: I0130 00:10:41.082204 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:41 crc kubenswrapper[5119]: I0130 00:10:41.082294 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:41 crc kubenswrapper[5119]: E0130 00:10:41.082813 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:41 crc kubenswrapper[5119]: I0130 00:10:41.602615 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.085285 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/2.log" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.086584 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/1.log" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.088798 5119 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="7b8b5696bbb27e63d50a4b8217da8f0cdb39a1af5aaabac4f18f16f1f859140c" exitCode=255 Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.088863 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerDied","Data":"7b8b5696bbb27e63d50a4b8217da8f0cdb39a1af5aaabac4f18f16f1f859140c"} Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.088904 5119 scope.go:117] "RemoveContainer" containerID="423bce264b4ea6214812555afabbb25b5896d935e39400d4271d69f481081581" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.089221 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.089999 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.090045 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.090085 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:42 crc kubenswrapper[5119]: E0130 00:10:42.090613 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.090999 5119 scope.go:117] "RemoveContainer" containerID="7b8b5696bbb27e63d50a4b8217da8f0cdb39a1af5aaabac4f18f16f1f859140c" Jan 30 00:10:42 crc kubenswrapper[5119]: E0130 00:10:42.091318 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:42 crc kubenswrapper[5119]: E0130 00:10:42.099503 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b67d5e23cc\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b67d5e23cc openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:18.902135756 +0000 UTC m=+22.916198235,LastTimestamp:2026-01-30 00:10:42.091260833 +0000 UTC m=+46.105323302,Count:4,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.605641 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.997717 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.998960 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.999032 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.999052 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:42 crc kubenswrapper[5119]: I0130 00:10:42.999090 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:43 crc kubenswrapper[5119]: E0130 00:10:43.014715 5119 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:43 crc kubenswrapper[5119]: I0130 00:10:43.094808 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/2.log" Jan 30 00:10:43 crc kubenswrapper[5119]: I0130 00:10:43.603826 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:44 crc kubenswrapper[5119]: I0130 00:10:44.604721 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:45 crc kubenswrapper[5119]: I0130 00:10:45.394029 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:45 crc kubenswrapper[5119]: I0130 00:10:45.394329 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:45 crc kubenswrapper[5119]: I0130 00:10:45.395341 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:45 crc kubenswrapper[5119]: I0130 00:10:45.395385 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:45 crc kubenswrapper[5119]: I0130 00:10:45.395416 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:45 crc kubenswrapper[5119]: E0130 00:10:45.395775 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:45 crc kubenswrapper[5119]: I0130 00:10:45.396088 5119 scope.go:117] "RemoveContainer" containerID="7b8b5696bbb27e63d50a4b8217da8f0cdb39a1af5aaabac4f18f16f1f859140c" Jan 30 00:10:45 crc kubenswrapper[5119]: E0130 00:10:45.396309 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:45 crc kubenswrapper[5119]: E0130 00:10:45.401634 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b67d5e23cc\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b67d5e23cc openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:18.902135756 +0000 UTC m=+22.916198235,LastTimestamp:2026-01-30 00:10:45.396272323 +0000 UTC m=+49.410334792,Count:5,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:45 crc kubenswrapper[5119]: I0130 00:10:45.605033 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:46 crc kubenswrapper[5119]: I0130 00:10:46.603441 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:46 crc kubenswrapper[5119]: E0130 00:10:46.782471 5119 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:47 crc kubenswrapper[5119]: I0130 00:10:47.047851 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:10:47 crc kubenswrapper[5119]: I0130 00:10:47.048052 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:47 crc kubenswrapper[5119]: I0130 00:10:47.048898 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:47 crc kubenswrapper[5119]: I0130 00:10:47.048975 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:47 crc kubenswrapper[5119]: I0130 00:10:47.049002 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:47 crc kubenswrapper[5119]: E0130 00:10:47.049643 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:47 crc kubenswrapper[5119]: E0130 00:10:47.283869 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:47 crc kubenswrapper[5119]: I0130 00:10:47.607057 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:48 crc kubenswrapper[5119]: E0130 00:10:48.371969 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: nodes \"crc\" is forbidden: User \"system:anonymous\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:10:48 crc kubenswrapper[5119]: I0130 00:10:48.605956 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:49 crc kubenswrapper[5119]: I0130 00:10:49.604077 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:50 crc kubenswrapper[5119]: I0130 00:10:50.015012 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:50 crc kubenswrapper[5119]: I0130 00:10:50.016021 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:50 crc kubenswrapper[5119]: I0130 00:10:50.016065 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:50 crc kubenswrapper[5119]: I0130 00:10:50.016077 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:50 crc kubenswrapper[5119]: I0130 00:10:50.016099 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:50 crc kubenswrapper[5119]: E0130 00:10:50.025675 5119 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:50 crc kubenswrapper[5119]: I0130 00:10:50.602608 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:51 crc kubenswrapper[5119]: I0130 00:10:51.082417 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:51 crc kubenswrapper[5119]: I0130 00:10:51.082657 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:51 crc kubenswrapper[5119]: I0130 00:10:51.083534 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:51 crc kubenswrapper[5119]: I0130 00:10:51.083590 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:51 crc kubenswrapper[5119]: I0130 00:10:51.083602 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:51 crc kubenswrapper[5119]: E0130 00:10:51.083920 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:51 crc kubenswrapper[5119]: I0130 00:10:51.084145 5119 scope.go:117] "RemoveContainer" containerID="7b8b5696bbb27e63d50a4b8217da8f0cdb39a1af5aaabac4f18f16f1f859140c" Jan 30 00:10:51 crc kubenswrapper[5119]: E0130 00:10:51.084328 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:51 crc kubenswrapper[5119]: E0130 00:10:51.091437 5119 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b67d5e23cc\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b67d5e23cc openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:18.902135756 +0000 UTC m=+22.916198235,LastTimestamp:2026-01-30 00:10:51.084297796 +0000 UTC m=+55.098360255,Count:6,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:51 crc kubenswrapper[5119]: E0130 00:10:51.125692 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:anonymous\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:10:51 crc kubenswrapper[5119]: I0130 00:10:51.602723 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:52 crc kubenswrapper[5119]: I0130 00:10:52.602009 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:53 crc kubenswrapper[5119]: I0130 00:10:53.605574 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:54 crc kubenswrapper[5119]: E0130 00:10:54.077838 5119 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: runtimeclasses.node.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"runtimeclasses\" in API group \"node.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:10:54 crc kubenswrapper[5119]: E0130 00:10:54.288196 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:54 crc kubenswrapper[5119]: I0130 00:10:54.604883 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:55 crc kubenswrapper[5119]: I0130 00:10:55.602741 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:56 crc kubenswrapper[5119]: I0130 00:10:56.601144 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:56 crc kubenswrapper[5119]: E0130 00:10:56.782792 5119 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5119]: I0130 00:10:57.026067 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:57 crc kubenswrapper[5119]: I0130 00:10:57.026907 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:57 crc kubenswrapper[5119]: I0130 00:10:57.026961 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:57 crc kubenswrapper[5119]: I0130 00:10:57.026971 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:57 crc kubenswrapper[5119]: I0130 00:10:57.027010 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:57 crc kubenswrapper[5119]: E0130 00:10:57.035185 5119 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:57 crc kubenswrapper[5119]: I0130 00:10:57.603302 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:58 crc kubenswrapper[5119]: I0130 00:10:58.601570 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:59 crc kubenswrapper[5119]: I0130 00:10:59.601916 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:11:00 crc kubenswrapper[5119]: I0130 00:11:00.602580 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:11:01 crc kubenswrapper[5119]: E0130 00:11:01.294371 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:11:01 crc kubenswrapper[5119]: I0130 00:11:01.603325 5119 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:11:01 crc kubenswrapper[5119]: I0130 00:11:01.617520 5119 csr.go:274] "Certificate signing request is approved, waiting to be issued" logger="kubernetes.io/kube-apiserver-client-kubelet" csr="csr-ggzcw" Jan 30 00:11:01 crc kubenswrapper[5119]: I0130 00:11:01.624499 5119 csr.go:270] "Certificate signing request is issued" logger="kubernetes.io/kube-apiserver-client-kubelet" csr="csr-ggzcw" Jan 30 00:11:01 crc kubenswrapper[5119]: I0130 00:11:01.661846 5119 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 30 00:11:02 crc kubenswrapper[5119]: I0130 00:11:02.436674 5119 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 30 00:11:02 crc kubenswrapper[5119]: I0130 00:11:02.626963 5119 certificate_manager.go:715] "Certificate rotation deadline determined" logger="kubernetes.io/kube-apiserver-client-kubelet" expiration="2026-03-01 00:06:01 +0000 UTC" deadline="2026-02-23 04:12:43.93017565 +0000 UTC" Jan 30 00:11:02 crc kubenswrapper[5119]: I0130 00:11:02.627065 5119 certificate_manager.go:431] "Waiting for next certificate rotation" logger="kubernetes.io/kube-apiserver-client-kubelet" sleep="580h1m41.303118732s" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.036192 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.037122 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.037153 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.037164 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.037238 5119 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.044631 5119 kubelet_node_status.go:127] "Node was previously registered" node="crc" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.044940 5119 kubelet_node_status.go:81] "Successfully registered node" node="crc" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.044969 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="error getting node \"crc\": node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.047665 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.047705 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.047715 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.047729 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.047748 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:04Z","lastTransitionTime":"2026-01-30T00:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.061928 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.068819 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.068867 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.068879 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.068896 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.068919 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:04Z","lastTransitionTime":"2026-01-30T00:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.078947 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.085483 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.085527 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.085540 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.085556 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.085567 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:04Z","lastTransitionTime":"2026-01-30T00:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.096737 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.103632 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.103656 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.103664 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.103713 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.103724 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:04Z","lastTransitionTime":"2026-01-30T00:11:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.112972 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.113131 5119 kubelet_node_status.go:584] "Unable to update node status" err="update node status exceeds retry count" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.113154 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.213815 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.314384 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.415072 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.515936 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.616536 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.716825 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.748628 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.749312 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.749354 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.749366 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.750010 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:11:04 crc kubenswrapper[5119]: I0130 00:11:04.750249 5119 scope.go:117] "RemoveContainer" containerID="7b8b5696bbb27e63d50a4b8217da8f0cdb39a1af5aaabac4f18f16f1f859140c" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.817245 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5119]: E0130 00:11:04.918267 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.019370 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.120165 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5119]: I0130 00:11:05.155226 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/2.log" Jan 30 00:11:05 crc kubenswrapper[5119]: I0130 00:11:05.156483 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09"} Jan 30 00:11:05 crc kubenswrapper[5119]: I0130 00:11:05.156656 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:05 crc kubenswrapper[5119]: I0130 00:11:05.157133 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:05 crc kubenswrapper[5119]: I0130 00:11:05.157177 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:05 crc kubenswrapper[5119]: I0130 00:11:05.157191 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.157740 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.221196 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.322277 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.423426 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.524281 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.632411 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.733324 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.833855 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5119]: E0130 00:11:05.934588 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.034844 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.135755 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.236761 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.337547 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.438150 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.539256 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.640318 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.741033 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.783602 5119 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.841738 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5119]: E0130 00:11:06.942137 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.042434 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.143450 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5119]: I0130 00:11:07.161816 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/3.log" Jan 30 00:11:07 crc kubenswrapper[5119]: I0130 00:11:07.162242 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/2.log" Jan 30 00:11:07 crc kubenswrapper[5119]: I0130 00:11:07.163724 5119 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09" exitCode=255 Jan 30 00:11:07 crc kubenswrapper[5119]: I0130 00:11:07.163785 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerDied","Data":"b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09"} Jan 30 00:11:07 crc kubenswrapper[5119]: I0130 00:11:07.163823 5119 scope.go:117] "RemoveContainer" containerID="7b8b5696bbb27e63d50a4b8217da8f0cdb39a1af5aaabac4f18f16f1f859140c" Jan 30 00:11:07 crc kubenswrapper[5119]: I0130 00:11:07.164063 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:07 crc kubenswrapper[5119]: I0130 00:11:07.164628 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:07 crc kubenswrapper[5119]: I0130 00:11:07.164667 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:07 crc kubenswrapper[5119]: I0130 00:11:07.164680 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.166236 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:11:07 crc kubenswrapper[5119]: I0130 00:11:07.166573 5119 scope.go:117] "RemoveContainer" containerID="b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.166892 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.244294 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.345235 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.445335 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.545835 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.646279 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.746660 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.846946 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5119]: E0130 00:11:07.947969 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5119]: E0130 00:11:08.048296 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5119]: E0130 00:11:08.148910 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5119]: I0130 00:11:08.167058 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/3.log" Jan 30 00:11:08 crc kubenswrapper[5119]: E0130 00:11:08.249976 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5119]: E0130 00:11:08.350889 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5119]: E0130 00:11:08.451553 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5119]: E0130 00:11:08.551782 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5119]: E0130 00:11:08.652448 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5119]: E0130 00:11:08.752882 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5119]: E0130 00:11:08.853212 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5119]: E0130 00:11:08.954236 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5119]: E0130 00:11:09.054360 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5119]: E0130 00:11:09.155119 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5119]: E0130 00:11:09.255620 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5119]: E0130 00:11:09.356521 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5119]: E0130 00:11:09.457646 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5119]: E0130 00:11:09.557753 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5119]: E0130 00:11:09.658157 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5119]: E0130 00:11:09.758695 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5119]: E0130 00:11:09.859824 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5119]: E0130 00:11:09.960603 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5119]: E0130 00:11:10.060701 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5119]: E0130 00:11:10.161428 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5119]: E0130 00:11:10.261966 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5119]: E0130 00:11:10.362599 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5119]: E0130 00:11:10.463532 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5119]: E0130 00:11:10.564133 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5119]: E0130 00:11:10.664789 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5119]: E0130 00:11:10.765307 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5119]: E0130 00:11:10.865461 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5119]: E0130 00:11:10.966310 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5119]: E0130 00:11:11.066419 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5119]: E0130 00:11:11.167421 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5119]: E0130 00:11:11.267720 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5119]: E0130 00:11:11.368869 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5119]: E0130 00:11:11.469204 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5119]: E0130 00:11:11.570119 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5119]: E0130 00:11:11.670892 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5119]: E0130 00:11:11.771602 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5119]: E0130 00:11:11.872017 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5119]: E0130 00:11:11.972332 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5119]: E0130 00:11:12.072767 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5119]: E0130 00:11:12.173270 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5119]: E0130 00:11:12.274374 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5119]: E0130 00:11:12.375373 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5119]: E0130 00:11:12.475484 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5119]: E0130 00:11:12.575689 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5119]: E0130 00:11:12.676369 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5119]: E0130 00:11:12.776871 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5119]: E0130 00:11:12.877577 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5119]: E0130 00:11:12.977886 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5119]: E0130 00:11:13.078525 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5119]: E0130 00:11:13.179448 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5119]: E0130 00:11:13.280412 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5119]: E0130 00:11:13.380884 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5119]: E0130 00:11:13.481183 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5119]: E0130 00:11:13.582023 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5119]: E0130 00:11:13.682873 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5119]: E0130 00:11:13.783436 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5119]: E0130 00:11:13.884526 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5119]: E0130 00:11:13.985888 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.087076 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.129343 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="error getting node \"crc\": node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.132408 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.132550 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.132788 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.132949 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.133044 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:14Z","lastTransitionTime":"2026-01-30T00:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.141298 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.147443 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.147482 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.147492 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.147506 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.147515 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:14Z","lastTransitionTime":"2026-01-30T00:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.155525 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.161762 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.161805 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.161817 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.161840 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.161855 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:14Z","lastTransitionTime":"2026-01-30T00:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.169712 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.175647 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.175738 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.175762 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.175795 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.175819 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:14Z","lastTransitionTime":"2026-01-30T00:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.186085 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.186202 5119 kubelet_node_status.go:584] "Unable to update node status" err="update node status exceeds retry count" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.188197 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.288303 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.389186 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.489655 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.590445 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.691582 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.748244 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.749378 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.749461 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:14 crc kubenswrapper[5119]: I0130 00:11:14.749487 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.749935 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.791889 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.892620 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5119]: E0130 00:11:14.993064 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.093440 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.157506 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.157754 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.158622 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.158715 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.158735 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.159335 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.159686 5119 scope.go:117] "RemoveContainer" containerID="b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.159981 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.194008 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.294617 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.393830 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.394070 5119 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.394748 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.394805 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.394839 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.394850 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.395298 5119 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:11:15 crc kubenswrapper[5119]: I0130 00:11:15.395564 5119 scope.go:117] "RemoveContainer" containerID="b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.395821 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.495443 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.595702 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.695860 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.796134 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.896513 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5119]: E0130 00:11:15.996882 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5119]: E0130 00:11:16.097358 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5119]: E0130 00:11:16.197865 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5119]: E0130 00:11:16.298163 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5119]: E0130 00:11:16.398675 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5119]: E0130 00:11:16.499160 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5119]: E0130 00:11:16.599676 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5119]: E0130 00:11:16.699780 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5119]: E0130 00:11:16.784702 5119 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5119]: E0130 00:11:16.800832 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5119]: E0130 00:11:16.901456 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5119]: E0130 00:11:17.002496 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5119]: E0130 00:11:17.103552 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5119]: E0130 00:11:17.204660 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5119]: E0130 00:11:17.304799 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5119]: I0130 00:11:17.358007 5119 reflector.go:430] "Caches populated" type="*v1.CSIDriver" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:11:17 crc kubenswrapper[5119]: E0130 00:11:17.405610 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5119]: E0130 00:11:17.506291 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5119]: E0130 00:11:17.607506 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5119]: E0130 00:11:17.708072 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5119]: E0130 00:11:17.809074 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5119]: E0130 00:11:17.909402 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5119]: E0130 00:11:18.010657 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5119]: E0130 00:11:18.110940 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5119]: E0130 00:11:18.211656 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5119]: E0130 00:11:18.312652 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5119]: E0130 00:11:18.413671 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5119]: E0130 00:11:18.514145 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5119]: E0130 00:11:18.614668 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5119]: E0130 00:11:18.715309 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5119]: E0130 00:11:18.815445 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5119]: E0130 00:11:18.915812 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5119]: E0130 00:11:19.016959 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5119]: E0130 00:11:19.118008 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5119]: E0130 00:11:19.218354 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5119]: E0130 00:11:19.319187 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5119]: E0130 00:11:19.420336 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5119]: E0130 00:11:19.520491 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5119]: E0130 00:11:19.620593 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5119]: E0130 00:11:19.721301 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5119]: E0130 00:11:19.821826 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5119]: E0130 00:11:19.922343 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:20 crc kubenswrapper[5119]: E0130 00:11:20.022885 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:20 crc kubenswrapper[5119]: E0130 00:11:20.123833 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:20 crc kubenswrapper[5119]: E0130 00:11:20.224532 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:20 crc kubenswrapper[5119]: E0130 00:11:20.325685 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:20 crc kubenswrapper[5119]: E0130 00:11:20.426340 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:20 crc kubenswrapper[5119]: E0130 00:11:20.526855 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:20 crc kubenswrapper[5119]: E0130 00:11:20.627278 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:20 crc kubenswrapper[5119]: E0130 00:11:20.728630 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:20 crc kubenswrapper[5119]: E0130 00:11:20.829069 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:20 crc kubenswrapper[5119]: E0130 00:11:20.929983 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:21 crc kubenswrapper[5119]: E0130 00:11:21.030951 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:21 crc kubenswrapper[5119]: E0130 00:11:21.131796 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:21 crc kubenswrapper[5119]: E0130 00:11:21.232039 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:21 crc kubenswrapper[5119]: E0130 00:11:21.332924 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:21 crc kubenswrapper[5119]: E0130 00:11:21.433754 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:21 crc kubenswrapper[5119]: E0130 00:11:21.534099 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:21 crc kubenswrapper[5119]: E0130 00:11:21.634612 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:21 crc kubenswrapper[5119]: E0130 00:11:21.735607 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:21 crc kubenswrapper[5119]: E0130 00:11:21.836046 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:21 crc kubenswrapper[5119]: E0130 00:11:21.937355 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:22 crc kubenswrapper[5119]: E0130 00:11:22.037876 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:22 crc kubenswrapper[5119]: E0130 00:11:22.137973 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:22 crc kubenswrapper[5119]: E0130 00:11:22.238606 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:22 crc kubenswrapper[5119]: E0130 00:11:22.339098 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:22 crc kubenswrapper[5119]: E0130 00:11:22.439459 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:22 crc kubenswrapper[5119]: E0130 00:11:22.539945 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:22 crc kubenswrapper[5119]: E0130 00:11:22.640609 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:22 crc kubenswrapper[5119]: E0130 00:11:22.740978 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:22 crc kubenswrapper[5119]: E0130 00:11:22.842014 5119 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:22 crc kubenswrapper[5119]: I0130 00:11:22.913689 5119 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:11:22 crc kubenswrapper[5119]: I0130 00:11:22.938036 5119 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:11:22 crc kubenswrapper[5119]: I0130 00:11:22.956218 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5119]: I0130 00:11:22.956290 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5119]: I0130 00:11:22.956308 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5119]: I0130 00:11:22.956329 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5119]: I0130 00:11:22.956351 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5119]: I0130 00:11:22.970850 5119 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:11:22 crc kubenswrapper[5119]: I0130 00:11:22.978532 5119 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.060657 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.060714 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.060732 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.060752 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.060768 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.068023 5119 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.162681 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.162728 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.162739 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.162755 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.162767 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.170663 5119 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-etcd/etcd-crc" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.264363 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.264426 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.264439 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.264453 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.264465 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.270813 5119 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.366018 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.366090 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.366111 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.366142 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.366160 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.468050 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.468097 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.468111 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.468127 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.468139 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.570096 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.570145 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.570160 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.570177 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.570189 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.622877 5119 apiserver.go:52] "Watching apiserver" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.629132 5119 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="pkg/kubelet/config/apiserver.go:66" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.629807 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-hf5dd","openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6","openshift-network-diagnostics/network-check-target-fhkjl","openshift-etcd/etcd-crc","openshift-image-registry/node-ca-g8ccx","openshift-multus/multus-qxpww","openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv","openshift-dns/node-resolver-7wgxz","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-multus/multus-additional-cni-plugins-w5d5n","openshift-network-node-identity/network-node-identity-dgvkt","openshift-network-operator/iptables-alerter-5jnd7","openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75","openshift-ovn-kubernetes/ovnkube-node-nwvqg","openshift-multus/network-metrics-daemon-8gjq7","openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.630897 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.631460 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.631536 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.632068 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.632196 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.632828 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.633150 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.633985 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.634003 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.634041 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-operator\"/\"metrics-tls\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.635135 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"ovnkube-identity-cm\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.635618 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"openshift-service-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.635891 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.635968 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.637189 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"kube-root-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.637375 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"env-overrides\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.637571 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-node-identity\"/\"network-node-identity-cert\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.639244 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"iptables-alerter-script\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.653338 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.664949 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.672220 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.672267 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.672280 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.672296 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.672308 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.674241 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.682093 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.690410 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17b87002-b798-480a-8e17-83053d698239\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gwt8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-fhkjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.698065 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.699921 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/34177974-8d82-49d2-a763-391d0df3bbd8-host-etc-kube\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.699962 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-m7xz2\" (UniqueName: \"kubernetes.io/projected/34177974-8d82-49d2-a763-391d0df3bbd8-kube-api-access-m7xz2\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.699986 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2nqp\" (UniqueName: \"kubernetes.io/projected/477de9be-7588-4409-8970-8585874094e8-kube-api-access-d2nqp\") pod \"node-resolver-7wgxz\" (UID: \"477de9be-7588-4409-8970-8585874094e8\") " pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700043 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fc4541ce-7789-4670-bc75-5c2868e52ce0-webhook-cert\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700195 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-env-overrides\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700220 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-ovnkube-identity-cm\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700236 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-8nt2j\" (UniqueName: \"kubernetes.io/projected/fc4541ce-7789-4670-bc75-5c2868e52ce0-kube-api-access-8nt2j\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700253 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-dsgwk\" (UniqueName: \"kubernetes.io/projected/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-kube-api-access-dsgwk\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700268 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700426 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/34177974-8d82-49d2-a763-391d0df3bbd8-metrics-tls\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700462 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/477de9be-7588-4409-8970-8585874094e8-tmp-dir\") pod \"node-resolver-7wgxz\" (UID: \"477de9be-7588-4409-8970-8585874094e8\") " pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700483 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-host-slash\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700505 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700524 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/477de9be-7588-4409-8970-8585874094e8-hosts-file\") pod \"node-resolver-7wgxz\" (UID: \"477de9be-7588-4409-8970-8585874094e8\") " pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700539 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-iptables-alerter-script\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700557 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.700572 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.700638 5119 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.700691 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.200674032 +0000 UTC m=+88.214736491 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.700698 5119 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.700784 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.200754434 +0000 UTC m=+88.214816893 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.708099 5119 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.708410 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-env-overrides\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.707043 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbdfe828b092b23e6d4480daf3e0216aada6debaf1ef1b314a0a31e73ebf13c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-5ff7774fd9-nljh6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.708594 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-ovnkube-identity-cm\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.709169 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-iptables-alerter-script\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.713941 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/34177974-8d82-49d2-a763-391d0df3bbd8-metrics-tls\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.713961 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fc4541ce-7789-4670-bc75-5c2868e52ce0-webhook-cert\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.714078 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.714353 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.714379 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.714414 5119 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.714513 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.214498935 +0000 UTC m=+88.228561394 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.717001 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"kube-root-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.718935 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"node-resolver-dockercfg-tk7bt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.719101 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"openshift-service-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.719120 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.719142 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.719155 5119 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.719213 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.219194768 +0000 UTC m=+88.233257227 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.719710 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7xz2\" (UniqueName: \"kubernetes.io/projected/34177974-8d82-49d2-a763-391d0df3bbd8-kube-api-access-m7xz2\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.719781 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsgwk\" (UniqueName: \"kubernetes.io/projected/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-kube-api-access-dsgwk\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.721362 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nt2j\" (UniqueName: \"kubernetes.io/projected/fc4541ce-7789-4670-bc75-5c2868e52ce0-kube-api-access-8nt2j\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.729020 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.735324 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.735487 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.737683 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"openshift-service-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.737720 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"kube-root-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.737771 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"node-ca-dockercfg-tjs74\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.737819 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"whereabouts-flatfile-config\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.737855 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"openshift-service-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.738063 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"kube-root-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.738095 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-dns/node-resolver-7wgxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477de9be-7588-4409-8970-8585874094e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d2nqp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7wgxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.738158 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"default-cni-sysctl-allowlist\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.738301 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"cni-copy-resources\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.738414 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-ancillary-tools-dockercfg-nwglk\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.739573 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"image-registry-certificates\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.740424 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.742271 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"multus-daemon-config\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.742336 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"default-dockercfg-g6kgg\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.747795 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.756987 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.757699 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.759952 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"kube-rbac-proxy\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.760136 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.760512 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-daemon-dockercfg-w9nzh\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.760766 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"proxy-tls\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.760881 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.767816 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17b87002-b798-480a-8e17-83053d698239\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gwt8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-fhkjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.774431 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.774467 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.774478 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.774492 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.774506 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.775812 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.776426 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.777767 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"ovnkube-config\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.778131 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"ovnkube-script-lib\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.778357 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"kube-root-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.778540 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-node-metrics-cert\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.778656 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"env-overrides\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.778787 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-node-dockercfg-l2v2m\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.778998 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"openshift-service-ca.crt\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.787035 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbdfe828b092b23e6d4480daf3e0216aada6debaf1ef1b314a0a31e73ebf13c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-5ff7774fd9-nljh6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.789632 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.789699 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8gjq7" podUID="64ebdc45-679c-4414-84fa-805ed5d07898" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.795273 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.795957 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbdfe828b092b23e6d4480daf3e0216aada6debaf1ef1b314a0a31e73ebf13c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-5ff7774fd9-nljh6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.795992 5119 scope.go:117] "RemoveContainer" containerID="b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09" Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.796448 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.797522 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-control-plane-dockercfg-nl8tp\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.797649 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-control-plane-metrics-cert\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.800869 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-cnibin\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.800898 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-os-release\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.800971 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-daemon-config\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801017 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-run-multus-certs\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801051 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f00bde3a-9397-4146-a9c4-22c9093d1608-host\") pod \"node-ca-g8ccx\" (UID: \"f00bde3a-9397-4146-a9c4-22c9093d1608\") " pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801069 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801116 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rgpd\" (UniqueName: \"kubernetes.io/projected/0cf99dcb-47cd-4077-9fb1-e39bf209e431-kube-api-access-7rgpd\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801140 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-system-cni-dir\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801162 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-system-cni-dir\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801187 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-etc-kubernetes\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801214 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-cnibin\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801258 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7be2f013-d656-48d9-b332-e66e20efa66f-cni-binary-copy\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801297 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-run-k8s-cni-cncf-io\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801319 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-host-slash\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801335 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-socket-dir-parent\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801365 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9jb2\" (UniqueName: \"kubernetes.io/projected/7be2f013-d656-48d9-b332-e66e20efa66f-kube-api-access-d9jb2\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801382 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/477de9be-7588-4409-8970-8585874094e8-hosts-file\") pod \"node-resolver-7wgxz\" (UID: \"477de9be-7588-4409-8970-8585874094e8\") " pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801422 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-var-lib-cni-bin\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801439 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbv85\" (UniqueName: \"kubernetes.io/projected/f00bde3a-9397-4146-a9c4-22c9093d1608-kube-api-access-nbv85\") pod \"node-ca-g8ccx\" (UID: \"f00bde3a-9397-4146-a9c4-22c9093d1608\") " pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801468 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f00bde3a-9397-4146-a9c4-22c9093d1608-serviceca\") pod \"node-ca-g8ccx\" (UID: \"f00bde3a-9397-4146-a9c4-22c9093d1608\") " pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801484 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7be2f013-d656-48d9-b332-e66e20efa66f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801500 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/34177974-8d82-49d2-a763-391d0df3bbd8-host-etc-kube\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801528 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-d2nqp\" (UniqueName: \"kubernetes.io/projected/477de9be-7588-4409-8970-8585874094e8-kube-api-access-d2nqp\") pod \"node-resolver-7wgxz\" (UID: \"477de9be-7588-4409-8970-8585874094e8\") " pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801552 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-conf-dir\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801585 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-os-release\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801635 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-run-netns\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801650 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-var-lib-kubelet\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801666 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"whereabouts-flatfile-configmap\" (UniqueName: \"kubernetes.io/configmap/7be2f013-d656-48d9-b332-e66e20efa66f-whereabouts-flatfile-configmap\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801684 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/477de9be-7588-4409-8970-8585874094e8-tmp-dir\") pod \"node-resolver-7wgxz\" (UID: \"477de9be-7588-4409-8970-8585874094e8\") " pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801702 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0cf99dcb-47cd-4077-9fb1-e39bf209e431-cni-binary-copy\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801721 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-hostroot\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801742 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-cni-dir\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801760 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-var-lib-cni-multus\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801828 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/34177974-8d82-49d2-a763-391d0df3bbd8-host-etc-kube\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.801846 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-host-slash\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.802013 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/477de9be-7588-4409-8970-8585874094e8-hosts-file\") pod \"node-resolver-7wgxz\" (UID: \"477de9be-7588-4409-8970-8585874094e8\") " pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.803005 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/477de9be-7588-4409-8970-8585874094e8-tmp-dir\") pod \"node-resolver-7wgxz\" (UID: \"477de9be-7588-4409-8970-8585874094e8\") " pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.804208 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-qxpww" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf99dcb-47cd-4077-9fb1-e39bf209e431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rgpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qxpww\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.816463 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7337c888-01aa-4a6b-b494-7a51eff39634\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nwvqg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.817704 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2nqp\" (UniqueName: \"kubernetes.io/projected/477de9be-7588-4409-8970-8585874094e8-kube-api-access-d2nqp\") pod \"node-resolver-7wgxz\" (UID: \"477de9be-7588-4409-8970-8585874094e8\") " pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.825372 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.832105 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.837692 5119 desired_state_of_world_populator.go:158] "Finished populating initial desired state of world" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.838279 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dff39619-cf4b-4c00-8d99-71c924fcf4c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k6t4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k6t4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hf5dd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.847754 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.863082 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-dns/node-resolver-7wgxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477de9be-7588-4409-8970-8585874094e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d2nqp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7wgxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.869124 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g8ccx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f00bde3a-9397-4146-a9c4-22c9093d1608\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nbv85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g8ccx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.876855 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.876892 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.876901 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.876914 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.876923 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.880478 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7be2f013-d656-48d9-b332-e66e20efa66f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6efa070ceb93cc5fc2e76eab6d9c96ac3c4f8812085d0b6eb6e3f513b5bac782\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3454e762466e22e2a893650b9781823558bc6fdfda2aa4188aff3cb819014c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/etc/whereabouts/config\\\",\\\"name\\\":\\\"whereabouts-flatfile-configmap\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-w5d5n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.888752 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903098 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6077b63e-53a2-4f96-9d56-1ce0324e4913-metrics-tls\") pod \"6077b63e-53a2-4f96-9d56-1ce0324e4913\" (UID: \"6077b63e-53a2-4f96-9d56-1ce0324e4913\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903123 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17b87002-b798-480a-8e17-83053d698239\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gwt8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-fhkjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903155 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/736c54fe-349c-4bb9-870a-d1c1d1c03831-serving-cert\") pod \"736c54fe-349c-4bb9-870a-d1c1d1c03831\" (UID: \"736c54fe-349c-4bb9-870a-d1c1d1c03831\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903452 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-env-overrides\") pod \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\" (UID: \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903495 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wj4qr\" (UniqueName: \"kubernetes.io/projected/149b3c48-e17c-4a66-a835-d86dabf6ff13-kube-api-access-wj4qr\") pod \"149b3c48-e17c-4a66-a835-d86dabf6ff13\" (UID: \"149b3c48-e17c-4a66-a835-d86dabf6ff13\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903525 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-catalog-content\") pod \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\" (UID: \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903533 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/736c54fe-349c-4bb9-870a-d1c1d1c03831-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "736c54fe-349c-4bb9-870a-d1c1d1c03831" (UID: "736c54fe-349c-4bb9-870a-d1c1d1c03831"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903547 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f65c0ac1-8bca-454d-a2e6-e35cb418beac-kube-api-access\") pod \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\" (UID: \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903573 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-srv-cert\") pod \"301e1965-1754-483d-b6cc-bfae7038bbca\" (UID: \"301e1965-1754-483d-b6cc-bfae7038bbca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903595 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddlk9\" (UniqueName: \"kubernetes.io/projected/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-kube-api-access-ddlk9\") pod \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\" (UID: \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903618 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjwtd\" (UniqueName: \"kubernetes.io/projected/869851b9-7ffb-4af0-b166-1d8aa40a5f80-kube-api-access-mjwtd\") pod \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\" (UID: \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903640 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-auth-proxy-config\") pod \"d565531a-ff86-4608-9d19-767de01ac31b\" (UID: \"d565531a-ff86-4608-9d19-767de01ac31b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903665 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-ocp-branding-template\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903688 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-login\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903710 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-config\") pod \"c491984c-7d4b-44aa-8c1e-d7974424fa47\" (UID: \"c491984c-7d4b-44aa-8c1e-d7974424fa47\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903731 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a555ff2e-0be6-46d5-897d-863bb92ae2b3-tmp\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903753 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftwb6\" (UniqueName: \"kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-kube-api-access-ftwb6\") pod \"9f71a554-e414-4bc3-96d2-674060397afe\" (UID: \"9f71a554-e414-4bc3-96d2-674060397afe\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903774 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-catalog-content\") pod \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\" (UID: \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903794 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-config\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903815 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/6077b63e-53a2-4f96-9d56-1ce0324e4913-tmp-dir\") pod \"6077b63e-53a2-4f96-9d56-1ce0324e4913\" (UID: \"6077b63e-53a2-4f96-9d56-1ce0324e4913\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903837 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-encryption-config\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903836 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "584e1f4a-8205-47d7-8efb-3afc6017c4c9" (UID: "584e1f4a-8205-47d7-8efb-3afc6017c4c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903858 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l87hs\" (UniqueName: \"kubernetes.io/projected/5ebfebf6-3ecd-458e-943f-bb25b52e2718-kube-api-access-l87hs\") pod \"5ebfebf6-3ecd-458e-943f-bb25b52e2718\" (UID: \"5ebfebf6-3ecd-458e-943f-bb25b52e2718\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903887 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f65c0ac1-8bca-454d-a2e6-e35cb418beac-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "f65c0ac1-8bca-454d-a2e6-e35cb418beac" (UID: "f65c0ac1-8bca-454d-a2e6-e35cb418beac"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903920 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-serving-cert\") pod \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\" (UID: \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903954 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnxbn\" (UniqueName: \"kubernetes.io/projected/ce090a97-9ab6-4c40-a719-64ff2acd9778-kube-api-access-xnxbn\") pod \"ce090a97-9ab6-4c40-a719-64ff2acd9778\" (UID: \"ce090a97-9ab6-4c40-a719-64ff2acd9778\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903977 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-mcd-auth-proxy-config\") pod \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\" (UID: \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.903999 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-image-import-ca\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904020 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-tmp\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904042 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hb7m\" (UniqueName: \"kubernetes.io/projected/94a6e063-3d1a-4d44-875d-185291448c31-kube-api-access-4hb7m\") pod \"94a6e063-3d1a-4d44-875d-185291448c31\" (UID: \"94a6e063-3d1a-4d44-875d-185291448c31\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904071 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twvbl\" (UniqueName: \"kubernetes.io/projected/b4750666-1362-4001-abd0-6f89964cc621-kube-api-access-twvbl\") pod \"b4750666-1362-4001-abd0-6f89964cc621\" (UID: \"b4750666-1362-4001-abd0-6f89964cc621\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904091 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-bound-sa-token\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904114 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7afa918d-be67-40a6-803c-d3b0ae99d815-config\") pod \"7afa918d-be67-40a6-803c-d3b0ae99d815\" (UID: \"7afa918d-be67-40a6-803c-d3b0ae99d815\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904138 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqbfk\" (UniqueName: \"kubernetes.io/projected/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-kube-api-access-qqbfk\") pod \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\" (UID: \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904160 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-trusted-ca-bundle\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904183 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-profile-collector-cert\") pod \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\" (UID: \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904206 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-trusted-ca-bundle\") pod \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\" (UID: \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904232 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b4750666-1362-4001-abd0-6f89964cc621-mcc-auth-proxy-config\") pod \"b4750666-1362-4001-abd0-6f89964cc621\" (UID: \"b4750666-1362-4001-abd0-6f89964cc621\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904255 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-utilities\") pod \"94a6e063-3d1a-4d44-875d-185291448c31\" (UID: \"94a6e063-3d1a-4d44-875d-185291448c31\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904265 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ebfebf6-3ecd-458e-943f-bb25b52e2718-kube-api-access-l87hs" (OuterVolumeSpecName: "kube-api-access-l87hs") pod "5ebfebf6-3ecd-458e-943f-bb25b52e2718" (UID: "5ebfebf6-3ecd-458e-943f-bb25b52e2718"). InnerVolumeSpecName "kube-api-access-l87hs". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904276 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-config\") pod \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\" (UID: \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904340 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99zj9\" (UniqueName: \"kubernetes.io/projected/d565531a-ff86-4608-9d19-767de01ac31b-kube-api-access-99zj9\") pod \"d565531a-ff86-4608-9d19-767de01ac31b\" (UID: \"d565531a-ff86-4608-9d19-767de01ac31b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904379 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/7afa918d-be67-40a6-803c-d3b0ae99d815-tmp\") pod \"7afa918d-be67-40a6-803c-d3b0ae99d815\" (UID: \"7afa918d-be67-40a6-803c-d3b0ae99d815\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904416 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d565531a-ff86-4608-9d19-767de01ac31b-proxy-tls\") pod \"d565531a-ff86-4608-9d19-767de01ac31b\" (UID: \"d565531a-ff86-4608-9d19-767de01ac31b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904440 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jjkz\" (UniqueName: \"kubernetes.io/projected/301e1965-1754-483d-b6cc-bfae7038bbca-kube-api-access-7jjkz\") pod \"301e1965-1754-483d-b6cc-bfae7038bbca\" (UID: \"301e1965-1754-483d-b6cc-bfae7038bbca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904463 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-node-bootstrap-token\") pod \"593a3561-7760-45c5-8f91-5aaef7475d0f\" (UID: \"593a3561-7760-45c5-8f91-5aaef7475d0f\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904488 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f7e2c886-118e-43bb-bef1-c78134de392b-tmp-dir\") pod \"f7e2c886-118e-43bb-bef1-c78134de392b\" (UID: \"f7e2c886-118e-43bb-bef1-c78134de392b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904511 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-oauth-config\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904585 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-cert\") pod \"a52afe44-fb37-46ed-a1f8-bf39727a3cbe\" (UID: \"a52afe44-fb37-46ed-a1f8-bf39727a3cbe\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904610 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/6ee8fbd3-1f81-4666-96da-5afc70819f1a-samples-operator-tls\") pod \"6ee8fbd3-1f81-4666-96da-5afc70819f1a\" (UID: \"6ee8fbd3-1f81-4666-96da-5afc70819f1a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904635 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfp5s\" (UniqueName: \"kubernetes.io/projected/cc85e424-18b2-4924-920b-bd291a8c4b01-kube-api-access-xfp5s\") pod \"cc85e424-18b2-4924-920b-bd291a8c4b01\" (UID: \"cc85e424-18b2-4924-920b-bd291a8c4b01\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904657 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-serving-ca\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904681 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18f80adb-c1c3-49ba-8ee4-932c851d3897-service-ca-bundle\") pod \"18f80adb-c1c3-49ba-8ee4-932c851d3897\" (UID: \"18f80adb-c1c3-49ba-8ee4-932c851d3897\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904703 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/92dfbade-90b6-4169-8c07-72cff7f2c82b-config-volume\") pod \"92dfbade-90b6-4169-8c07-72cff7f2c82b\" (UID: \"92dfbade-90b6-4169-8c07-72cff7f2c82b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904728 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d7e8f42f-dc0e-424b-bb56-5ec849834888-kube-api-access\") pod \"d7e8f42f-dc0e-424b-bb56-5ec849834888\" (UID: \"d7e8f42f-dc0e-424b-bb56-5ec849834888\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904764 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-serving-cert\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904786 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dztfv\" (UniqueName: \"kubernetes.io/projected/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-kube-api-access-dztfv\") pod \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\" (UID: \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904809 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rmnv\" (UniqueName: \"kubernetes.io/projected/b605f283-6f2e-42da-a838-54421690f7d0-kube-api-access-6rmnv\") pod \"b605f283-6f2e-42da-a838-54421690f7d0\" (UID: \"b605f283-6f2e-42da-a838-54421690f7d0\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904832 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f65c0ac1-8bca-454d-a2e6-e35cb418beac-config\") pod \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\" (UID: \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904862 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/736c54fe-349c-4bb9-870a-d1c1d1c03831-tmp\") pod \"736c54fe-349c-4bb9-870a-d1c1d1c03831\" (UID: \"736c54fe-349c-4bb9-870a-d1c1d1c03831\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904886 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/16bdd140-dce1-464c-ab47-dd5798d1d256-available-featuregates\") pod \"16bdd140-dce1-464c-ab47-dd5798d1d256\" (UID: \"16bdd140-dce1-464c-ab47-dd5798d1d256\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904909 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vsz9\" (UniqueName: \"kubernetes.io/projected/c491984c-7d4b-44aa-8c1e-d7974424fa47-kube-api-access-9vsz9\") pod \"c491984c-7d4b-44aa-8c1e-d7974424fa47\" (UID: \"c491984c-7d4b-44aa-8c1e-d7974424fa47\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904931 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-images\") pod \"c491984c-7d4b-44aa-8c1e-d7974424fa47\" (UID: \"c491984c-7d4b-44aa-8c1e-d7974424fa47\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904932 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "af33e427-6803-48c2-a76a-dd9deb7cbf9a" (UID: "af33e427-6803-48c2-a76a-dd9deb7cbf9a"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904955 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7df94c10-441d-4386-93a6-6730fb7bcde0-ovn-control-plane-metrics-cert\") pod \"7df94c10-441d-4386-93a6-6730fb7bcde0\" (UID: \"7df94c10-441d-4386-93a6-6730fb7bcde0\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904958 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "af33e427-6803-48c2-a76a-dd9deb7cbf9a" (UID: "af33e427-6803-48c2-a76a-dd9deb7cbf9a"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.904980 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-profile-collector-cert\") pod \"301e1965-1754-483d-b6cc-bfae7038bbca\" (UID: \"301e1965-1754-483d-b6cc-bfae7038bbca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905055 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "d565531a-ff86-4608-9d19-767de01ac31b" (UID: "d565531a-ff86-4608-9d19-767de01ac31b"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905203 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a208c9c2-333b-4b4a-be0d-bc32ec38a821-package-server-manager-serving-cert\") pod \"a208c9c2-333b-4b4a-be0d-bc32ec38a821\" (UID: \"a208c9c2-333b-4b4a-be0d-bc32ec38a821\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905348 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d565531a-ff86-4608-9d19-767de01ac31b-kube-api-access-99zj9" (OuterVolumeSpecName: "kube-api-access-99zj9") pod "d565531a-ff86-4608-9d19-767de01ac31b" (UID: "d565531a-ff86-4608-9d19-767de01ac31b"). InnerVolumeSpecName "kube-api-access-99zj9". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905456 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-webhook-cert\") pod \"a7a88189-c967-4640-879e-27665747f20c\" (UID: \"a7a88189-c967-4640-879e-27665747f20c\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905473 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/869851b9-7ffb-4af0-b166-1d8aa40a5f80-kube-api-access-mjwtd" (OuterVolumeSpecName: "kube-api-access-mjwtd") pod "869851b9-7ffb-4af0-b166-1d8aa40a5f80" (UID: "869851b9-7ffb-4af0-b166-1d8aa40a5f80"). InnerVolumeSpecName "kube-api-access-mjwtd". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905483 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nspp\" (UniqueName: \"kubernetes.io/projected/a7a88189-c967-4640-879e-27665747f20c-kube-api-access-8nspp\") pod \"a7a88189-c967-4640-879e-27665747f20c\" (UID: \"a7a88189-c967-4640-879e-27665747f20c\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905531 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/c5f2bfad-70f6-4185-a3d9-81ce12720767-tmp-dir\") pod \"c5f2bfad-70f6-4185-a3d9-81ce12720767\" (UID: \"c5f2bfad-70f6-4185-a3d9-81ce12720767\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905562 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-trusted-ca\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905571 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ee8fbd3-1f81-4666-96da-5afc70819f1a-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "6ee8fbd3-1f81-4666-96da-5afc70819f1a" (UID: "6ee8fbd3-1f81-4666-96da-5afc70819f1a"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905576 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-kube-api-access-ddlk9" (OuterVolumeSpecName: "kube-api-access-ddlk9") pod "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" (UID: "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a"). InnerVolumeSpecName "kube-api-access-ddlk9". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905591 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a7a88189-c967-4640-879e-27665747f20c-tmpfs\") pod \"a7a88189-c967-4640-879e-27665747f20c\" (UID: \"a7a88189-c967-4640-879e-27665747f20c\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905623 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16bdd140-dce1-464c-ab47-dd5798d1d256-serving-cert\") pod \"16bdd140-dce1-464c-ab47-dd5798d1d256\" (UID: \"16bdd140-dce1-464c-ab47-dd5798d1d256\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905687 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-catalog-content\") pod \"b605f283-6f2e-42da-a838-54421690f7d0\" (UID: \"b605f283-6f2e-42da-a838-54421690f7d0\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905714 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/567683bd-0efc-4f21-b076-e28559628404-tmp-dir\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905722 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce090a97-9ab6-4c40-a719-64ff2acd9778-kube-api-access-xnxbn" (OuterVolumeSpecName: "kube-api-access-xnxbn") pod "ce090a97-9ab6-4c40-a719-64ff2acd9778" (UID: "ce090a97-9ab6-4c40-a719-64ff2acd9778"). InnerVolumeSpecName "kube-api-access-xnxbn". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905736 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/5ebfebf6-3ecd-458e-943f-bb25b52e2718-serviceca\") pod \"5ebfebf6-3ecd-458e-943f-bb25b52e2718\" (UID: \"5ebfebf6-3ecd-458e-943f-bb25b52e2718\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905892 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26xrl\" (UniqueName: \"kubernetes.io/projected/a208c9c2-333b-4b4a-be0d-bc32ec38a821-kube-api-access-26xrl\") pod \"a208c9c2-333b-4b4a-be0d-bc32ec38a821\" (UID: \"a208c9c2-333b-4b4a-be0d-bc32ec38a821\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905940 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-trusted-ca\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905959 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-certificates\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905977 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7e8f42f-dc0e-424b-bb56-5ec849834888-serving-cert\") pod \"d7e8f42f-dc0e-424b-bb56-5ec849834888\" (UID: \"d7e8f42f-dc0e-424b-bb56-5ec849834888\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.905994 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-config\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906011 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-cabundle\") pod \"ce090a97-9ab6-4c40-a719-64ff2acd9778\" (UID: \"ce090a97-9ab6-4c40-a719-64ff2acd9778\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906032 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-config\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906054 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-utilities\") pod \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\" (UID: \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906086 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-client\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906112 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dmhf\" (UniqueName: \"kubernetes.io/projected/736c54fe-349c-4bb9-870a-d1c1d1c03831-kube-api-access-6dmhf\") pod \"736c54fe-349c-4bb9-870a-d1c1d1c03831\" (UID: \"736c54fe-349c-4bb9-870a-d1c1d1c03831\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906132 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-certs\") pod \"593a3561-7760-45c5-8f91-5aaef7475d0f\" (UID: \"593a3561-7760-45c5-8f91-5aaef7475d0f\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906155 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/42a11a02-47e1-488f-b270-2679d3298b0e-control-plane-machine-set-operator-tls\") pod \"42a11a02-47e1-488f-b270-2679d3298b0e\" (UID: \"42a11a02-47e1-488f-b270-2679d3298b0e\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906180 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d7e8f42f-dc0e-424b-bb56-5ec849834888-service-ca\") pod \"d7e8f42f-dc0e-424b-bb56-5ec849834888\" (UID: \"d7e8f42f-dc0e-424b-bb56-5ec849834888\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906205 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-proxy-ca-bundles\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906230 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-tmpfs\") pod \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\" (UID: \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906250 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-config\") pod \"2325ffef-9d5b-447f-b00e-3efc429acefe\" (UID: \"2325ffef-9d5b-447f-b00e-3efc429acefe\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906266 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5rsr\" (UniqueName: \"kubernetes.io/projected/af33e427-6803-48c2-a76a-dd9deb7cbf9a-kube-api-access-z5rsr\") pod \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\" (UID: \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906282 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7afa918d-be67-40a6-803c-d3b0ae99d815-kube-api-access\") pod \"7afa918d-be67-40a6-803c-d3b0ae99d815\" (UID: \"7afa918d-be67-40a6-803c-d3b0ae99d815\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906300 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f65c0ac1-8bca-454d-a2e6-e35cb418beac-tmp-dir\") pod \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\" (UID: \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906319 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-serving-cert\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906337 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lcfw\" (UniqueName: \"kubernetes.io/projected/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-kube-api-access-5lcfw\") pod \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\" (UID: \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906349 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-config" (OuterVolumeSpecName: "config") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906356 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zth6t\" (UniqueName: \"kubernetes.io/projected/6077b63e-53a2-4f96-9d56-1ce0324e4913-kube-api-access-zth6t\") pod \"6077b63e-53a2-4f96-9d56-1ce0324e4913\" (UID: \"6077b63e-53a2-4f96-9d56-1ce0324e4913\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906466 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zg8nc\" (UniqueName: \"kubernetes.io/projected/2325ffef-9d5b-447f-b00e-3efc429acefe-kube-api-access-zg8nc\") pod \"2325ffef-9d5b-447f-b00e-3efc429acefe\" (UID: \"2325ffef-9d5b-447f-b00e-3efc429acefe\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906498 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w94wk\" (UniqueName: \"kubernetes.io/projected/01080b46-74f1-4191-8755-5152a57b3b25-kube-api-access-w94wk\") pod \"01080b46-74f1-4191-8755-5152a57b3b25\" (UID: \"01080b46-74f1-4191-8755-5152a57b3b25\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906518 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7afa918d-be67-40a6-803c-d3b0ae99d815-tmp" (OuterVolumeSpecName: "tmp") pod "7afa918d-be67-40a6-803c-d3b0ae99d815" (UID: "7afa918d-be67-40a6-803c-d3b0ae99d815"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906524 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pllx6\" (UniqueName: \"kubernetes.io/projected/81e39f7b-62e4-4fc9-992a-6535ce127a02-kube-api-access-pllx6\") pod \"81e39f7b-62e4-4fc9-992a-6535ce127a02\" (UID: \"81e39f7b-62e4-4fc9-992a-6535ce127a02\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906542 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a555ff2e-0be6-46d5-897d-863bb92ae2b3-tmp" (OuterVolumeSpecName: "tmp") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906565 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsb9b\" (UniqueName: \"kubernetes.io/projected/09cfa50b-4138-4585-a53e-64dd3ab73335-kube-api-access-zsb9b\") pod \"09cfa50b-4138-4585-a53e-64dd3ab73335\" (UID: \"09cfa50b-4138-4585-a53e-64dd3ab73335\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906594 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-utilities\") pod \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\" (UID: \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906623 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a555ff2e-0be6-46d5-897d-863bb92ae2b3-serving-cert\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906640 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9f71a554-e414-4bc3-96d2-674060397afe-metrics-tls\") pod \"9f71a554-e414-4bc3-96d2-674060397afe\" (UID: \"9f71a554-e414-4bc3-96d2-674060397afe\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906657 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tknt7\" (UniqueName: \"kubernetes.io/projected/584e1f4a-8205-47d7-8efb-3afc6017c4c9-kube-api-access-tknt7\") pod \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\" (UID: \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906788 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-utilities\") pod \"149b3c48-e17c-4a66-a835-d86dabf6ff13\" (UID: \"149b3c48-e17c-4a66-a835-d86dabf6ff13\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906778 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc85e424-18b2-4924-920b-bd291a8c4b01-kube-api-access-xfp5s" (OuterVolumeSpecName: "kube-api-access-xfp5s") pod "cc85e424-18b2-4924-920b-bd291a8c4b01" (UID: "cc85e424-18b2-4924-920b-bd291a8c4b01"). InnerVolumeSpecName "kube-api-access-xfp5s". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906812 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-trusted-ca-bundle\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906798 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81e39f7b-62e4-4fc9-992a-6535ce127a02-kube-api-access-pllx6" (OuterVolumeSpecName: "kube-api-access-pllx6") pod "81e39f7b-62e4-4fc9-992a-6535ce127a02" (UID: "81e39f7b-62e4-4fc9-992a-6535ce127a02"). InnerVolumeSpecName "kube-api-access-pllx6". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906833 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-router-certs\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906853 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94l9h\" (UniqueName: \"kubernetes.io/projected/16bdd140-dce1-464c-ab47-dd5798d1d256-kube-api-access-94l9h\") pod \"16bdd140-dce1-464c-ab47-dd5798d1d256\" (UID: \"16bdd140-dce1-464c-ab47-dd5798d1d256\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906876 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-config\") pod \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\" (UID: \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906895 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbc2l\" (UniqueName: \"kubernetes.io/projected/593a3561-7760-45c5-8f91-5aaef7475d0f-kube-api-access-sbc2l\") pod \"593a3561-7760-45c5-8f91-5aaef7475d0f\" (UID: \"593a3561-7760-45c5-8f91-5aaef7475d0f\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906899 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" (UID: "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906912 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-config\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.906976 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/92dfbade-90b6-4169-8c07-72cff7f2c82b-metrics-tls\") pod \"92dfbade-90b6-4169-8c07-72cff7f2c82b\" (UID: \"92dfbade-90b6-4169-8c07-72cff7f2c82b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907012 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-catalog-content\") pod \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\" (UID: \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907314 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" (UID: "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907331 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907343 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7599e0b6-bddf-4def-b7f2-0b32206e8651-serving-cert\") pod \"7599e0b6-bddf-4def-b7f2-0b32206e8651\" (UID: \"7599e0b6-bddf-4def-b7f2-0b32206e8651\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907348 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7a88189-c967-4640-879e-27665747f20c-kube-api-access-8nspp" (OuterVolumeSpecName: "kube-api-access-8nspp") pod "a7a88189-c967-4640-879e-27665747f20c" (UID: "a7a88189-c967-4640-879e-27665747f20c"). InnerVolumeSpecName "kube-api-access-8nspp". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907409 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-catalog-content\") pod \"149b3c48-e17c-4a66-a835-d86dabf6ff13\" (UID: \"149b3c48-e17c-4a66-a835-d86dabf6ff13\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907516 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5lgh\" (UniqueName: \"kubernetes.io/projected/d19cb085-0c5b-4810-b654-ce7923221d90-kube-api-access-m5lgh\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907532 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6077b63e-53a2-4f96-9d56-1ce0324e4913-kube-api-access-zth6t" (OuterVolumeSpecName: "kube-api-access-zth6t") pod "6077b63e-53a2-4f96-9d56-1ce0324e4913" (UID: "6077b63e-53a2-4f96-9d56-1ce0324e4913"). InnerVolumeSpecName "kube-api-access-zth6t". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907549 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/92dfbade-90b6-4169-8c07-72cff7f2c82b-tmp-dir\") pod \"92dfbade-90b6-4169-8c07-72cff7f2c82b\" (UID: \"92dfbade-90b6-4169-8c07-72cff7f2c82b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907610 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6077b63e-53a2-4f96-9d56-1ce0324e4913-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "6077b63e-53a2-4f96-9d56-1ce0324e4913" (UID: "6077b63e-53a2-4f96-9d56-1ce0324e4913"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907623 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-catalog-content\") pod \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\" (UID: \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907653 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-error\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907679 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-trusted-ca-bundle\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907677 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a208c9c2-333b-4b4a-be0d-bc32ec38a821-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "a208c9c2-333b-4b4a-be0d-bc32ec38a821" (UID: "a208c9c2-333b-4b4a-be0d-bc32ec38a821"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907702 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-webhook-certs\") pod \"0dd0fbac-8c0d-4228-8faa-abbeedabf7db\" (UID: \"0dd0fbac-8c0d-4228-8faa-abbeedabf7db\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907727 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7afa918d-be67-40a6-803c-d3b0ae99d815-config" (OuterVolumeSpecName: "config") pod "7afa918d-be67-40a6-803c-d3b0ae99d815" (UID: "7afa918d-be67-40a6-803c-d3b0ae99d815"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907782 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907808 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-utilities\") pod \"cc85e424-18b2-4924-920b-bd291a8c4b01\" (UID: \"cc85e424-18b2-4924-920b-bd291a8c4b01\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907831 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-binary-copy\") pod \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\" (UID: \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907852 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-serving-cert\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907877 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-key\") pod \"ce090a97-9ab6-4c40-a719-64ff2acd9778\" (UID: \"ce090a97-9ab6-4c40-a719-64ff2acd9778\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907906 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-service-ca\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907906 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-kube-api-access-qqbfk" (OuterVolumeSpecName: "kube-api-access-qqbfk") pod "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" (UID: "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a"). InnerVolumeSpecName "kube-api-access-qqbfk". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907931 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-utilities\") pod \"b605f283-6f2e-42da-a838-54421690f7d0\" (UID: \"b605f283-6f2e-42da-a838-54421690f7d0\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907955 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-image-registry-operator-tls\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.907980 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-machine-approver-tls\") pod \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\" (UID: \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908002 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-catalog-content\") pod \"cc85e424-18b2-4924-920b-bd291a8c4b01\" (UID: \"cc85e424-18b2-4924-920b-bd291a8c4b01\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908025 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-serving-ca\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908048 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-default-certificate\") pod \"18f80adb-c1c3-49ba-8ee4-932c851d3897\" (UID: \"18f80adb-c1c3-49ba-8ee4-932c851d3897\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908073 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptkcf\" (UniqueName: \"kubernetes.io/projected/7599e0b6-bddf-4def-b7f2-0b32206e8651-kube-api-access-ptkcf\") pod \"7599e0b6-bddf-4def-b7f2-0b32206e8651\" (UID: \"7599e0b6-bddf-4def-b7f2-0b32206e8651\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908097 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-utilities\") pod \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\" (UID: \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908126 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-bound-sa-token\") pod \"9f71a554-e414-4bc3-96d2-674060397afe\" (UID: \"9f71a554-e414-4bc3-96d2-674060397afe\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908178 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4smf\" (UniqueName: \"kubernetes.io/projected/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-kube-api-access-q4smf\") pod \"0dd0fbac-8c0d-4228-8faa-abbeedabf7db\" (UID: \"0dd0fbac-8c0d-4228-8faa-abbeedabf7db\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908198 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-serving-cert\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908218 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c5f2bfad-70f6-4185-a3d9-81ce12720767-serving-cert\") pod \"c5f2bfad-70f6-4185-a3d9-81ce12720767\" (UID: \"c5f2bfad-70f6-4185-a3d9-81ce12720767\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908236 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f71a554-e414-4bc3-96d2-674060397afe-trusted-ca\") pod \"9f71a554-e414-4bc3-96d2-674060397afe\" (UID: \"9f71a554-e414-4bc3-96d2-674060397afe\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908253 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ws8zz\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-kube-api-access-ws8zz\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908274 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m26jq\" (UniqueName: \"kubernetes.io/projected/567683bd-0efc-4f21-b076-e28559628404-kube-api-access-m26jq\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908295 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01080b46-74f1-4191-8755-5152a57b3b25-config\") pod \"01080b46-74f1-4191-8755-5152a57b3b25\" (UID: \"01080b46-74f1-4191-8755-5152a57b3b25\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908527 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-config" (OuterVolumeSpecName: "config") pod "c491984c-7d4b-44aa-8c1e-d7974424fa47" (UID: "c491984c-7d4b-44aa-8c1e-d7974424fa47"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908545 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92dfbade-90b6-4169-8c07-72cff7f2c82b-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "92dfbade-90b6-4169-8c07-72cff7f2c82b" (UID: "92dfbade-90b6-4169-8c07-72cff7f2c82b"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908676 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "a7a88189-c967-4640-879e-27665747f20c" (UID: "a7a88189-c967-4640-879e-27665747f20c"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908680 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94a6e063-3d1a-4d44-875d-185291448c31-kube-api-access-4hb7m" (OuterVolumeSpecName: "kube-api-access-4hb7m") pod "94a6e063-3d1a-4d44-875d-185291448c31" (UID: "94a6e063-3d1a-4d44-875d-185291448c31"). InnerVolumeSpecName "kube-api-access-4hb7m". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908690 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "301e1965-1754-483d-b6cc-bfae7038bbca" (UID: "301e1965-1754-483d-b6cc-bfae7038bbca"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908690 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5f2bfad-70f6-4185-a3d9-81ce12720767-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "c5f2bfad-70f6-4185-a3d9-81ce12720767" (UID: "c5f2bfad-70f6-4185-a3d9-81ce12720767"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908810 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01080b46-74f1-4191-8755-5152a57b3b25-config" (OuterVolumeSpecName: "config") pod "01080b46-74f1-4191-8755-5152a57b3b25" (UID: "01080b46-74f1-4191-8755-5152a57b3b25"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.908883 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" (UID: "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909238 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d565531a-ff86-4608-9d19-767de01ac31b-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "d565531a-ff86-4608-9d19-767de01ac31b" (UID: "d565531a-ff86-4608-9d19-767de01ac31b"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909291 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-idp-0-file-data\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909354 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "301e1965-1754-483d-b6cc-bfae7038bbca" (UID: "301e1965-1754-483d-b6cc-bfae7038bbca"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909367 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pskd\" (UniqueName: \"kubernetes.io/projected/a555ff2e-0be6-46d5-897d-863bb92ae2b3-kube-api-access-8pskd\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909473 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909489 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "e1d2a42d-af1d-4054-9618-ab545e0ed8b7" (UID: "e1d2a42d-af1d-4054-9618-ab545e0ed8b7"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909506 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-kube-api-access-ftwb6" (OuterVolumeSpecName: "kube-api-access-ftwb6") pod "9f71a554-e414-4bc3-96d2-674060397afe" (UID: "9f71a554-e414-4bc3-96d2-674060397afe"). InnerVolumeSpecName "kube-api-access-ftwb6". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909825 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909826 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6077b63e-53a2-4f96-9d56-1ce0324e4913-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "6077b63e-53a2-4f96-9d56-1ce0324e4913" (UID: "6077b63e-53a2-4f96-9d56-1ce0324e4913"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909888 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-config\") pod \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\" (UID: \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909888 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4750666-1362-4001-abd0-6f89964cc621-kube-api-access-twvbl" (OuterVolumeSpecName: "kube-api-access-twvbl") pod "b4750666-1362-4001-abd0-6f89964cc621" (UID: "b4750666-1362-4001-abd0-6f89964cc621"). InnerVolumeSpecName "kube-api-access-twvbl". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909919 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-encryption-config\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909925 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-tmp" (OuterVolumeSpecName: "tmp") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909944 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-proxy-tls\") pod \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\" (UID: \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909970 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-script-lib\") pod \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\" (UID: \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.909998 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzt4w\" (UniqueName: \"kubernetes.io/projected/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-kube-api-access-rzt4w\") pod \"a52afe44-fb37-46ed-a1f8-bf39727a3cbe\" (UID: \"a52afe44-fb37-46ed-a1f8-bf39727a3cbe\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910015 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/301e1965-1754-483d-b6cc-bfae7038bbca-kube-api-access-7jjkz" (OuterVolumeSpecName: "kube-api-access-7jjkz") pod "301e1965-1754-483d-b6cc-bfae7038bbca" (UID: "301e1965-1754-483d-b6cc-bfae7038bbca"). InnerVolumeSpecName "kube-api-access-7jjkz". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910024 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-client\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910041 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4750666-1362-4001-abd0-6f89964cc621-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "b4750666-1362-4001-abd0-6f89964cc621" (UID: "b4750666-1362-4001-abd0-6f89964cc621"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910101 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-tls\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910161 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9e9b5059-1b3e-4067-a63d-2952cbe863af-installation-pull-secrets\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910194 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-client-ca\") pod \"736c54fe-349c-4bb9-870a-d1c1d1c03831\" (UID: \"736c54fe-349c-4bb9-870a-d1c1d1c03831\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910223 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-tmp\") pod \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\" (UID: \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910248 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovn-node-metrics-cert\") pod \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\" (UID: \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910311 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-multus-daemon-config\") pod \"81e39f7b-62e4-4fc9-992a-6535ce127a02\" (UID: \"81e39f7b-62e4-4fc9-992a-6535ce127a02\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910337 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/301e1965-1754-483d-b6cc-bfae7038bbca-tmpfs\") pod \"301e1965-1754-483d-b6cc-bfae7038bbca\" (UID: \"301e1965-1754-483d-b6cc-bfae7038bbca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910361 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-srv-cert\") pod \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\" (UID: \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910406 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4tqq\" (UniqueName: \"kubernetes.io/projected/6ee8fbd3-1f81-4666-96da-5afc70819f1a-kube-api-access-d4tqq\") pod \"6ee8fbd3-1f81-4666-96da-5afc70819f1a\" (UID: \"6ee8fbd3-1f81-4666-96da-5afc70819f1a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910436 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-trusted-ca\") pod \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\" (UID: \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910464 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nb9c\" (UniqueName: \"kubernetes.io/projected/6edfcf45-925b-4eff-b940-95b6fc0b85d4-kube-api-access-8nb9c\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910495 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-etcd-client\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910522 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qgrkj\" (UniqueName: \"kubernetes.io/projected/42a11a02-47e1-488f-b270-2679d3298b0e-kube-api-access-qgrkj\") pod \"42a11a02-47e1-488f-b270-2679d3298b0e\" (UID: \"42a11a02-47e1-488f-b270-2679d3298b0e\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910550 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5f2bfad-70f6-4185-a3d9-81ce12720767-config\") pod \"c5f2bfad-70f6-4185-a3d9-81ce12720767\" (UID: \"c5f2bfad-70f6-4185-a3d9-81ce12720767\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910579 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hckvg\" (UniqueName: \"kubernetes.io/projected/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-kube-api-access-hckvg\") pod \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\" (UID: \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910605 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-serving-cert\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910628 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-metrics-certs\") pod \"18f80adb-c1c3-49ba-8ee4-932c851d3897\" (UID: \"18f80adb-c1c3-49ba-8ee4-932c851d3897\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910657 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c491984c-7d4b-44aa-8c1e-d7974424fa47-machine-api-operator-tls\") pod \"c491984c-7d4b-44aa-8c1e-d7974424fa47\" (UID: \"c491984c-7d4b-44aa-8c1e-d7974424fa47\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910688 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09cfa50b-4138-4585-a53e-64dd3ab73335-serving-cert\") pod \"09cfa50b-4138-4585-a53e-64dd3ab73335\" (UID: \"09cfa50b-4138-4585-a53e-64dd3ab73335\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910716 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-ovnkube-config\") pod \"7df94c10-441d-4386-93a6-6730fb7bcde0\" (UID: \"7df94c10-441d-4386-93a6-6730fb7bcde0\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910771 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-catalog-content\") pod \"94a6e063-3d1a-4d44-875d-185291448c31\" (UID: \"94a6e063-3d1a-4d44-875d-185291448c31\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910799 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-audit-policies\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910826 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-service-ca\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910865 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-utilities\") pod \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\" (UID: \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910899 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbmqg\" (UniqueName: \"kubernetes.io/projected/18f80adb-c1c3-49ba-8ee4-932c851d3897-kube-api-access-wbmqg\") pod \"18f80adb-c1c3-49ba-8ee4-932c851d3897\" (UID: \"18f80adb-c1c3-49ba-8ee4-932c851d3897\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910900 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2325ffef-9d5b-447f-b00e-3efc429acefe-kube-api-access-zg8nc" (OuterVolumeSpecName: "kube-api-access-zg8nc") pod "2325ffef-9d5b-447f-b00e-3efc429acefe" (UID: "2325ffef-9d5b-447f-b00e-3efc429acefe"). InnerVolumeSpecName "kube-api-access-zg8nc". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910881 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910920 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-kube-api-access-5lcfw" (OuterVolumeSpecName: "kube-api-access-5lcfw") pod "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" (UID: "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9"). InnerVolumeSpecName "kube-api-access-5lcfw". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910930 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-audit-policies\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.910998 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7cps\" (UniqueName: \"kubernetes.io/projected/af41de71-79cf-4590-bbe9-9e8b848862cb-kube-api-access-d7cps\") pod \"af41de71-79cf-4590-bbe9-9e8b848862cb\" (UID: \"af41de71-79cf-4590-bbe9-9e8b848862cb\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911075 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-cliconfig\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911104 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-operator-metrics\") pod \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\" (UID: \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911140 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkdh6\" (UniqueName: \"kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-kube-api-access-tkdh6\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911168 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9stx\" (UniqueName: \"kubernetes.io/projected/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-kube-api-access-l9stx\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911208 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-audit\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911235 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-oauth-serving-cert\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911238 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01080b46-74f1-4191-8755-5152a57b3b25-kube-api-access-w94wk" (OuterVolumeSpecName: "kube-api-access-w94wk") pod "01080b46-74f1-4191-8755-5152a57b3b25" (UID: "01080b46-74f1-4191-8755-5152a57b3b25"). InnerVolumeSpecName "kube-api-access-w94wk". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911251 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911269 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g4lr\" (UniqueName: \"kubernetes.io/projected/f7e2c886-118e-43bb-bef1-c78134de392b-kube-api-access-6g4lr\") pod \"f7e2c886-118e-43bb-bef1-c78134de392b\" (UID: \"f7e2c886-118e-43bb-bef1-c78134de392b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911300 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f65c0ac1-8bca-454d-a2e6-e35cb418beac-serving-cert\") pod \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\" (UID: \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911321 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92dfbade-90b6-4169-8c07-72cff7f2c82b-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "92dfbade-90b6-4169-8c07-72cff7f2c82b" (UID: "92dfbade-90b6-4169-8c07-72cff7f2c82b"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911329 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-stats-auth\") pod \"18f80adb-c1c3-49ba-8ee4-932c851d3897\" (UID: \"18f80adb-c1c3-49ba-8ee4-932c851d3897\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911375 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-images\") pod \"d565531a-ff86-4608-9d19-767de01ac31b\" (UID: \"d565531a-ff86-4608-9d19-767de01ac31b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911426 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-sysctl-allowlist\") pod \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\" (UID: \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911456 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted-pem\" (UniqueName: \"kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-ca-trust-extracted-pem\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911524 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09cfa50b-4138-4585-a53e-64dd3ab73335-config\") pod \"09cfa50b-4138-4585-a53e-64dd3ab73335\" (UID: \"09cfa50b-4138-4585-a53e-64dd3ab73335\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911774 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "18f80adb-c1c3-49ba-8ee4-932c851d3897" (UID: "18f80adb-c1c3-49ba-8ee4-932c851d3897"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911814 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911890 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2325ffef-9d5b-447f-b00e-3efc429acefe-serving-cert\") pod \"2325ffef-9d5b-447f-b00e-3efc429acefe\" (UID: \"2325ffef-9d5b-447f-b00e-3efc429acefe\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911926 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-auth-proxy-config\") pod \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\" (UID: \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911954 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-env-overrides\") pod \"7df94c10-441d-4386-93a6-6730fb7bcde0\" (UID: \"7df94c10-441d-4386-93a6-6730fb7bcde0\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.911979 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-service-ca-bundle\") pod \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\" (UID: \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912016 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a555ff2e-0be6-46d5-897d-863bb92ae2b3-kube-api-access-8pskd" (OuterVolumeSpecName: "kube-api-access-8pskd") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "kube-api-access-8pskd". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912042 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7afa918d-be67-40a6-803c-d3b0ae99d815-serving-cert\") pod \"7afa918d-be67-40a6-803c-d3b0ae99d815\" (UID: \"7afa918d-be67-40a6-803c-d3b0ae99d815\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912070 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-utilities" (OuterVolumeSpecName: "utilities") pod "94a6e063-3d1a-4d44-875d-185291448c31" (UID: "94a6e063-3d1a-4d44-875d-185291448c31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912073 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pddnv\" (UniqueName: \"kubernetes.io/projected/e093be35-bb62-4843-b2e8-094545761610-kube-api-access-pddnv\") pod \"e093be35-bb62-4843-b2e8-094545761610\" (UID: \"e093be35-bb62-4843-b2e8-094545761610\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912129 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7599e0b6-bddf-4def-b7f2-0b32206e8651-config\") pod \"7599e0b6-bddf-4def-b7f2-0b32206e8651\" (UID: \"7599e0b6-bddf-4def-b7f2-0b32206e8651\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912153 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a555ff2e-0be6-46d5-897d-863bb92ae2b3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912161 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-serving-cert\") pod \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\" (UID: \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912184 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912210 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-provider-selection\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912245 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxfcv\" (UniqueName: \"kubernetes.io/projected/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-kube-api-access-xxfcv\") pod \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\" (UID: \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912329 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-bound-sa-token\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912338 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-config" (OuterVolumeSpecName: "config") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912360 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b4750666-1362-4001-abd0-6f89964cc621-proxy-tls\") pod \"b4750666-1362-4001-abd0-6f89964cc621\" (UID: \"b4750666-1362-4001-abd0-6f89964cc621\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912411 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-config\") pod \"736c54fe-349c-4bb9-870a-d1c1d1c03831\" (UID: \"736c54fe-349c-4bb9-870a-d1c1d1c03831\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912446 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9e9b5059-1b3e-4067-a63d-2952cbe863af-ca-trust-extracted\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912474 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-trusted-ca-bundle\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912505 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9z4sw\" (UniqueName: \"kubernetes.io/projected/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-kube-api-access-9z4sw\") pod \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\" (UID: \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912533 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgx6b\" (UniqueName: \"kubernetes.io/projected/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-kube-api-access-pgx6b\") pod \"f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4\" (UID: \"f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912562 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c5f2bfad-70f6-4185-a3d9-81ce12720767-kube-api-access\") pod \"c5f2bfad-70f6-4185-a3d9-81ce12720767\" (UID: \"c5f2bfad-70f6-4185-a3d9-81ce12720767\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912565 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7e8f42f-dc0e-424b-bb56-5ec849834888-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d7e8f42f-dc0e-424b-bb56-5ec849834888" (UID: "d7e8f42f-dc0e-424b-bb56-5ec849834888"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912591 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"whereabouts-flatfile-configmap\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-whereabouts-flatfile-configmap\") pod \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\" (UID: \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912627 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grwfz\" (UniqueName: \"kubernetes.io/projected/31fa8943-81cc-4750-a0b7-0fa9ab5af883-kube-api-access-grwfz\") pod \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\" (UID: \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915215 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-cni-binary-copy\") pod \"81e39f7b-62e4-4fc9-992a-6535ce127a02\" (UID: \"81e39f7b-62e4-4fc9-992a-6535ce127a02\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915250 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-apiservice-cert\") pod \"a7a88189-c967-4640-879e-27665747f20c\" (UID: \"a7a88189-c967-4640-879e-27665747f20c\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915274 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-client-ca\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915294 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01080b46-74f1-4191-8755-5152a57b3b25-serving-cert\") pod \"01080b46-74f1-4191-8755-5152a57b3b25\" (UID: \"01080b46-74f1-4191-8755-5152a57b3b25\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915318 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-metrics-certs\") pod \"f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4\" (UID: \"f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915342 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-tmp\") pod \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\" (UID: \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915378 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-ca\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915431 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ks6v2\" (UniqueName: \"kubernetes.io/projected/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-kube-api-access-ks6v2\") pod \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\" (UID: \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915451 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-service-ca\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915471 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfzkj\" (UniqueName: \"kubernetes.io/projected/0effdbcf-dd7d-404d-9d48-77536d665a5d-kube-api-access-mfzkj\") pod \"0effdbcf-dd7d-404d-9d48-77536d665a5d\" (UID: \"0effdbcf-dd7d-404d-9d48-77536d665a5d\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915500 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-session\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915518 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-trusted-ca\") pod \"2325ffef-9d5b-447f-b00e-3efc429acefe\" (UID: \"2325ffef-9d5b-447f-b00e-3efc429acefe\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915543 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4g8ts\" (UniqueName: \"kubernetes.io/projected/92dfbade-90b6-4169-8c07-72cff7f2c82b-kube-api-access-4g8ts\") pod \"92dfbade-90b6-4169-8c07-72cff7f2c82b\" (UID: \"92dfbade-90b6-4169-8c07-72cff7f2c82b\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915565 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmmzf\" (UniqueName: \"kubernetes.io/projected/7df94c10-441d-4386-93a6-6730fb7bcde0-kube-api-access-nmmzf\") pod \"7df94c10-441d-4386-93a6-6730fb7bcde0\" (UID: \"7df94c10-441d-4386-93a6-6730fb7bcde0\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915589 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-config\") pod \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\" (UID: \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915609 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm9x7\" (UniqueName: \"kubernetes.io/projected/f559dfa3-3917-43a2-97f6-61ddfda10e93-kube-api-access-hm9x7\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.922034 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cff142ed-dc20-4c3a-b157-dbc3d3cdd9af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"},\\\"containerID\\\":\\\"cri-o://e3beffa08faf81da0394c450397815931c72fd49f42fd6218edbaac85fe6528f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://67b9fad7cb88a39c44754f831ea6adeeea20e24f7bf2131cfd0d76dde042924c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://b9aba26083b8cd47e9a89c003b6fec66d485c32c4e80234a1f3e95d56d86e185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://1e870376ee035d24f0f3af0d51bbb91b9860fd137c3f88364c555717aca89a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://1abdada27c369710bf3ba52e3f0e584cecec6ad6f6f11b5757c0d0748d7ae54c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c43657b4b20e341dcfedff314e3d3465e7cfe7bbf41accc0e0e85f10f9c7b5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c43657b4b20e341dcfedff314e3d3465e7cfe7bbf41accc0e0e85f10f9c7b5a4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd-auto-backup\\\",\\\"name\\\":\\\"etcd-auto-backup-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://1c3d092f2660f2aa973d143779dbceb7b50b612d7c321d44e9b2f15927a434cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c3d092f2660f2aa973d143779dbceb7b50b612d7c321d44e9b2f15927a434cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://bc1ae53975abf21f2965d1f74a1167fbfee499d97830ad56708df3c81cf0083a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc1ae53975abf21f2965d1f74a1167fbfee499d97830ad56708df3c81cf0083a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:10:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:10:00Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912591 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" (UID: "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912876 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e093be35-bb62-4843-b2e8-094545761610-kube-api-access-pddnv" (OuterVolumeSpecName: "kube-api-access-pddnv") pod "e093be35-bb62-4843-b2e8-094545761610" (UID: "e093be35-bb62-4843-b2e8-094545761610"). InnerVolumeSpecName "kube-api-access-pddnv". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.912954 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/736c54fe-349c-4bb9-870a-d1c1d1c03831-kube-api-access-6dmhf" (OuterVolumeSpecName: "kube-api-access-6dmhf") pod "736c54fe-349c-4bb9-870a-d1c1d1c03831" (UID: "736c54fe-349c-4bb9-870a-d1c1d1c03831"). InnerVolumeSpecName "kube-api-access-6dmhf". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913026 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913115 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-config" (OuterVolumeSpecName: "config") pod "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" (UID: "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913136 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92dfbade-90b6-4169-8c07-72cff7f2c82b-config-volume" (OuterVolumeSpecName: "config-volume") pod "92dfbade-90b6-4169-8c07-72cff7f2c82b" (UID: "92dfbade-90b6-4169-8c07-72cff7f2c82b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913173 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16bdd140-dce1-464c-ab47-dd5798d1d256-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "16bdd140-dce1-464c-ab47-dd5798d1d256" (UID: "16bdd140-dce1-464c-ab47-dd5798d1d256"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913222 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-config" (OuterVolumeSpecName: "config") pod "fc8db2c7-859d-47b3-a900-2bd0c0b2973b" (UID: "fc8db2c7-859d-47b3-a900-2bd0c0b2973b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.925207 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" (UID: "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913520 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913521 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16bdd140-dce1-464c-ab47-dd5798d1d256-kube-api-access-94l9h" (OuterVolumeSpecName: "kube-api-access-94l9h") pod "16bdd140-dce1-464c-ab47-dd5798d1d256" (UID: "16bdd140-dce1-464c-ab47-dd5798d1d256"). InnerVolumeSpecName "kube-api-access-94l9h". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913606 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7599e0b6-bddf-4def-b7f2-0b32206e8651-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7599e0b6-bddf-4def-b7f2-0b32206e8651" (UID: "7599e0b6-bddf-4def-b7f2-0b32206e8651"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913740 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-utilities" (OuterVolumeSpecName: "utilities") pod "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" (UID: "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913760 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" (UID: "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913805 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.913953 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.914069 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f71a554-e414-4bc3-96d2-674060397afe-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "9f71a554-e414-4bc3-96d2-674060397afe" (UID: "9f71a554-e414-4bc3-96d2-674060397afe"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.914100 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "af33e427-6803-48c2-a76a-dd9deb7cbf9a" (UID: "af33e427-6803-48c2-a76a-dd9deb7cbf9a"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.914177 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-kube-api-access-dztfv" (OuterVolumeSpecName: "kube-api-access-dztfv") pod "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" (UID: "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7"). InnerVolumeSpecName "kube-api-access-dztfv". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.914262 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/567683bd-0efc-4f21-b076-e28559628404-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.914324 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/584e1f4a-8205-47d7-8efb-3afc6017c4c9-kube-api-access-tknt7" (OuterVolumeSpecName: "kube-api-access-tknt7") pod "584e1f4a-8205-47d7-8efb-3afc6017c4c9" (UID: "584e1f4a-8205-47d7-8efb-3afc6017c4c9"). InnerVolumeSpecName "kube-api-access-tknt7". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.914621 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.914710 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-utilities" (OuterVolumeSpecName: "utilities") pod "31fa8943-81cc-4750-a0b7-0fa9ab5af883" (UID: "31fa8943-81cc-4750-a0b7-0fa9ab5af883"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.914947 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.914993 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b605f283-6f2e-42da-a838-54421690f7d0-kube-api-access-6rmnv" (OuterVolumeSpecName: "kube-api-access-6rmnv") pod "b605f283-6f2e-42da-a838-54421690f7d0" (UID: "b605f283-6f2e-42da-a838-54421690f7d0"). InnerVolumeSpecName "kube-api-access-6rmnv". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915048 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b605f283-6f2e-42da-a838-54421690f7d0" (UID: "b605f283-6f2e-42da-a838-54421690f7d0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915066 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31fa8943-81cc-4750-a0b7-0fa9ab5af883-kube-api-access-grwfz" (OuterVolumeSpecName: "kube-api-access-grwfz") pod "31fa8943-81cc-4750-a0b7-0fa9ab5af883" (UID: "31fa8943-81cc-4750-a0b7-0fa9ab5af883"). InnerVolumeSpecName "kube-api-access-grwfz". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915095 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "e1d2a42d-af1d-4054-9618-ab545e0ed8b7" (UID: "e1d2a42d-af1d-4054-9618-ab545e0ed8b7"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915119 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7599e0b6-bddf-4def-b7f2-0b32206e8651-config" (OuterVolumeSpecName: "config") pod "7599e0b6-bddf-4def-b7f2-0b32206e8651" (UID: "7599e0b6-bddf-4def-b7f2-0b32206e8651"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915337 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: E0130 00:11:23.915644 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.415623628 +0000 UTC m=+88.429686087 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915645 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7e2c886-118e-43bb-bef1-c78134de392b-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "f7e2c886-118e-43bb-bef1-c78134de392b" (UID: "f7e2c886-118e-43bb-bef1-c78134de392b"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915666 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-ca-trust-extracted-pem" (OuterVolumeSpecName: "ca-trust-extracted-pem") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "ca-trust-extracted-pem". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915687 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18f80adb-c1c3-49ba-8ee4-932c851d3897-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "18f80adb-c1c3-49ba-8ee4-932c851d3897" (UID: "18f80adb-c1c3-49ba-8ee4-932c851d3897"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915695 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "593a3561-7760-45c5-8f91-5aaef7475d0f" (UID: "593a3561-7760-45c5-8f91-5aaef7475d0f"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915716 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7a88189-c967-4640-879e-27665747f20c-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "a7a88189-c967-4640-879e-27665747f20c" (UID: "a7a88189-c967-4640-879e-27665747f20c"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915877 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915888 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a208c9c2-333b-4b4a-be0d-bc32ec38a821-kube-api-access-26xrl" (OuterVolumeSpecName: "kube-api-access-26xrl") pod "a208c9c2-333b-4b4a-be0d-bc32ec38a821" (UID: "a208c9c2-333b-4b4a-be0d-bc32ec38a821"). InnerVolumeSpecName "kube-api-access-26xrl". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915720 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d19cb085-0c5b-4810-b654-ce7923221d90-kube-api-access-m5lgh" (OuterVolumeSpecName: "kube-api-access-m5lgh") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "kube-api-access-m5lgh". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.915918 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f559dfa3-3917-43a2-97f6-61ddfda10e93-kube-api-access-hm9x7" (OuterVolumeSpecName: "kube-api-access-hm9x7") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "kube-api-access-hm9x7". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.916080 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-certs" (OuterVolumeSpecName: "certs") pod "593a3561-7760-45c5-8f91-5aaef7475d0f" (UID: "593a3561-7760-45c5-8f91-5aaef7475d0f"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.916110 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af41de71-79cf-4590-bbe9-9e8b848862cb-kube-api-access-d7cps" (OuterVolumeSpecName: "kube-api-access-d7cps") pod "af41de71-79cf-4590-bbe9-9e8b848862cb" (UID: "af41de71-79cf-4590-bbe9-9e8b848862cb"). InnerVolumeSpecName "kube-api-access-d7cps". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.916270 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/593a3561-7760-45c5-8f91-5aaef7475d0f-kube-api-access-sbc2l" (OuterVolumeSpecName: "kube-api-access-sbc2l") pod "593a3561-7760-45c5-8f91-5aaef7475d0f" (UID: "593a3561-7760-45c5-8f91-5aaef7475d0f"). InnerVolumeSpecName "kube-api-access-sbc2l". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.916345 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.916380 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-utilities" (OuterVolumeSpecName: "utilities") pod "cc85e424-18b2-4924-920b-bd291a8c4b01" (UID: "cc85e424-18b2-4924-920b-bd291a8c4b01"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.916456 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "869851b9-7ffb-4af0-b166-1d8aa40a5f80" (UID: "869851b9-7ffb-4af0-b166-1d8aa40a5f80"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.916501 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f65c0ac1-8bca-454d-a2e6-e35cb418beac-config" (OuterVolumeSpecName: "config") pod "f65c0ac1-8bca-454d-a2e6-e35cb418beac" (UID: "f65c0ac1-8bca-454d-a2e6-e35cb418beac"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.916614 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.916610 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09cfa50b-4138-4585-a53e-64dd3ab73335-config" (OuterVolumeSpecName: "config") pod "09cfa50b-4138-4585-a53e-64dd3ab73335" (UID: "09cfa50b-4138-4585-a53e-64dd3ab73335"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.916657 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "81e39f7b-62e4-4fc9-992a-6535ce127a02" (UID: "81e39f7b-62e4-4fc9-992a-6535ce127a02"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.917120 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ebfebf6-3ecd-458e-943f-bb25b52e2718-serviceca" (OuterVolumeSpecName: "serviceca") pod "5ebfebf6-3ecd-458e-943f-bb25b52e2718" (UID: "5ebfebf6-3ecd-458e-943f-bb25b52e2718"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.917359 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-kube-api-access-rzt4w" (OuterVolumeSpecName: "kube-api-access-rzt4w") pod "a52afe44-fb37-46ed-a1f8-bf39727a3cbe" (UID: "a52afe44-fb37-46ed-a1f8-bf39727a3cbe"). InnerVolumeSpecName "kube-api-access-rzt4w". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.917364 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09cfa50b-4138-4585-a53e-64dd3ab73335-kube-api-access-zsb9b" (OuterVolumeSpecName: "kube-api-access-zsb9b") pod "09cfa50b-4138-4585-a53e-64dd3ab73335" (UID: "09cfa50b-4138-4585-a53e-64dd3ab73335"). InnerVolumeSpecName "kube-api-access-zsb9b". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.917619 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-images" (OuterVolumeSpecName: "images") pod "d565531a-ff86-4608-9d19-767de01ac31b" (UID: "d565531a-ff86-4608-9d19-767de01ac31b"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.917675 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42a11a02-47e1-488f-b270-2679d3298b0e-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "42a11a02-47e1-488f-b270-2679d3298b0e" (UID: "42a11a02-47e1-488f-b270-2679d3298b0e"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.917746 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.917824 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "0dd0fbac-8c0d-4228-8faa-abbeedabf7db" (UID: "0dd0fbac-8c0d-4228-8faa-abbeedabf7db"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.918050 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.918068 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e9b5059-1b3e-4067-a63d-2952cbe863af-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.918879 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.919140 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.919160 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c491984c-7d4b-44aa-8c1e-d7974424fa47-kube-api-access-9vsz9" (OuterVolumeSpecName: "kube-api-access-9vsz9") pod "c491984c-7d4b-44aa-8c1e-d7974424fa47" (UID: "c491984c-7d4b-44aa-8c1e-d7974424fa47"). InnerVolumeSpecName "kube-api-access-9vsz9". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.919170 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/149b3c48-e17c-4a66-a835-d86dabf6ff13-kube-api-access-wj4qr" (OuterVolumeSpecName: "kube-api-access-wj4qr") pod "149b3c48-e17c-4a66-a835-d86dabf6ff13" (UID: "149b3c48-e17c-4a66-a835-d86dabf6ff13"). InnerVolumeSpecName "kube-api-access-wj4qr". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.919184 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.919725 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-cert" (OuterVolumeSpecName: "cert") pod "a52afe44-fb37-46ed-a1f8-bf39727a3cbe" (UID: "a52afe44-fb37-46ed-a1f8-bf39727a3cbe"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.919747 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0effdbcf-dd7d-404d-9d48-77536d665a5d-kube-api-access-mfzkj" (OuterVolumeSpecName: "kube-api-access-mfzkj") pod "0effdbcf-dd7d-404d-9d48-77536d665a5d" (UID: "0effdbcf-dd7d-404d-9d48-77536d665a5d"). InnerVolumeSpecName "kube-api-access-mfzkj". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920067 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920249 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "a7a88189-c967-4640-879e-27665747f20c" (UID: "a7a88189-c967-4640-879e-27665747f20c"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920250 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5f2bfad-70f6-4185-a3d9-81ce12720767-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c5f2bfad-70f6-4185-a3d9-81ce12720767" (UID: "c5f2bfad-70f6-4185-a3d9-81ce12720767"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920417 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/736c54fe-349c-4bb9-870a-d1c1d1c03831-tmp" (OuterVolumeSpecName: "tmp") pod "736c54fe-349c-4bb9-870a-d1c1d1c03831" (UID: "736c54fe-349c-4bb9-870a-d1c1d1c03831"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920434 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-kube-api-access-tkdh6" (OuterVolumeSpecName: "kube-api-access-tkdh6") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "kube-api-access-tkdh6". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920651 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920653 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-kube-api-access-q4smf" (OuterVolumeSpecName: "kube-api-access-q4smf") pod "0dd0fbac-8c0d-4228-8faa-abbeedabf7db" (UID: "0dd0fbac-8c0d-4228-8faa-abbeedabf7db"). InnerVolumeSpecName "kube-api-access-q4smf". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920668 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "af33e427-6803-48c2-a76a-dd9deb7cbf9a" (UID: "af33e427-6803-48c2-a76a-dd9deb7cbf9a"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920743 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920760 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" (UID: "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.920785 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01080b46-74f1-4191-8755-5152a57b3b25-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01080b46-74f1-4191-8755-5152a57b3b25" (UID: "01080b46-74f1-4191-8755-5152a57b3b25"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.921049 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/301e1965-1754-483d-b6cc-bfae7038bbca-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "301e1965-1754-483d-b6cc-bfae7038bbca" (UID: "301e1965-1754-483d-b6cc-bfae7038bbca"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.921434 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-client-ca" (OuterVolumeSpecName: "client-ca") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.921565 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "9f71a554-e414-4bc3-96d2-674060397afe" (UID: "9f71a554-e414-4bc3-96d2-674060397afe"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.921637 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-key" (OuterVolumeSpecName: "signing-key") pod "ce090a97-9ab6-4c40-a719-64ff2acd9778" (UID: "ce090a97-9ab6-4c40-a719-64ff2acd9778"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.921703 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7e8f42f-dc0e-424b-bb56-5ec849834888-service-ca" (OuterVolumeSpecName: "service-ca") pod "d7e8f42f-dc0e-424b-bb56-5ec849834888" (UID: "d7e8f42f-dc0e-424b-bb56-5ec849834888"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.921762 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-kube-api-access-ws8zz" (OuterVolumeSpecName: "kube-api-access-ws8zz") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "kube-api-access-ws8zz". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.922175 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c491984c-7d4b-44aa-8c1e-d7974424fa47-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "c491984c-7d4b-44aa-8c1e-d7974424fa47" (UID: "c491984c-7d4b-44aa-8c1e-d7974424fa47"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.922190 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7df94c10-441d-4386-93a6-6730fb7bcde0-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "7df94c10-441d-4386-93a6-6730fb7bcde0" (UID: "7df94c10-441d-4386-93a6-6730fb7bcde0"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.922253 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16bdd140-dce1-464c-ab47-dd5798d1d256-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "16bdd140-dce1-464c-ab47-dd5798d1d256" (UID: "16bdd140-dce1-464c-ab47-dd5798d1d256"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.922292 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "81e39f7b-62e4-4fc9-992a-6535ce127a02" (UID: "81e39f7b-62e4-4fc9-992a-6535ce127a02"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.922318 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af33e427-6803-48c2-a76a-dd9deb7cbf9a-kube-api-access-z5rsr" (OuterVolumeSpecName: "kube-api-access-z5rsr") pod "af33e427-6803-48c2-a76a-dd9deb7cbf9a" (UID: "af33e427-6803-48c2-a76a-dd9deb7cbf9a"). InnerVolumeSpecName "kube-api-access-z5rsr". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.922342 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "869851b9-7ffb-4af0-b166-1d8aa40a5f80" (UID: "869851b9-7ffb-4af0-b166-1d8aa40a5f80"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.922371 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-images" (OuterVolumeSpecName: "images") pod "c491984c-7d4b-44aa-8c1e-d7974424fa47" (UID: "c491984c-7d4b-44aa-8c1e-d7974424fa47"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.922634 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09cfa50b-4138-4585-a53e-64dd3ab73335-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09cfa50b-4138-4585-a53e-64dd3ab73335" (UID: "09cfa50b-4138-4585-a53e-64dd3ab73335"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.922811 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-kube-api-access-l9stx" (OuterVolumeSpecName: "kube-api-access-l9stx") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "kube-api-access-l9stx". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.923073 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7df94c10-441d-4386-93a6-6730fb7bcde0-kube-api-access-nmmzf" (OuterVolumeSpecName: "kube-api-access-nmmzf") pod "7df94c10-441d-4386-93a6-6730fb7bcde0" (UID: "7df94c10-441d-4386-93a6-6730fb7bcde0"). InnerVolumeSpecName "kube-api-access-nmmzf". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.923134 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/567683bd-0efc-4f21-b076-e28559628404-kube-api-access-m26jq" (OuterVolumeSpecName: "kube-api-access-m26jq") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "kube-api-access-m26jq". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.923150 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7e2c886-118e-43bb-bef1-c78134de392b-kube-api-access-6g4lr" (OuterVolumeSpecName: "kube-api-access-6g4lr") pod "f7e2c886-118e-43bb-bef1-c78134de392b" (UID: "f7e2c886-118e-43bb-bef1-c78134de392b"). InnerVolumeSpecName "kube-api-access-6g4lr". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.923219 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "2325ffef-9d5b-447f-b00e-3efc429acefe" (UID: "2325ffef-9d5b-447f-b00e-3efc429acefe"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.923283 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "7df94c10-441d-4386-93a6-6730fb7bcde0" (UID: "7df94c10-441d-4386-93a6-6730fb7bcde0"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.923615 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "fc8db2c7-859d-47b3-a900-2bd0c0b2973b" (UID: "fc8db2c7-859d-47b3-a900-2bd0c0b2973b"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.924280 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" (UID: "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.924421 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.924583 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.924600 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.924794 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-client-ca" (OuterVolumeSpecName: "client-ca") pod "736c54fe-349c-4bb9-870a-d1c1d1c03831" (UID: "736c54fe-349c-4bb9-870a-d1c1d1c03831"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.924985 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4" (UID: "f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.925022 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7afa918d-be67-40a6-803c-d3b0ae99d815-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7afa918d-be67-40a6-803c-d3b0ae99d815" (UID: "7afa918d-be67-40a6-803c-d3b0ae99d815"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.925050 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7afa918d-be67-40a6-803c-d3b0ae99d815-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7afa918d-be67-40a6-803c-d3b0ae99d815" (UID: "7afa918d-be67-40a6-803c-d3b0ae99d815"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.925078 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-kube-api-access-xxfcv" (OuterVolumeSpecName: "kube-api-access-xxfcv") pod "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" (UID: "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff"). InnerVolumeSpecName "kube-api-access-xxfcv". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.925104 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-tmp" (OuterVolumeSpecName: "tmp") pod "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" (UID: "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.925250 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-audit" (OuterVolumeSpecName: "audit") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.925402 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f65c0ac1-8bca-454d-a2e6-e35cb418beac-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "f65c0ac1-8bca-454d-a2e6-e35cb418beac" (UID: "f65c0ac1-8bca-454d-a2e6-e35cb418beac"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.925511 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-whereabouts-flatfile-configmap" (OuterVolumeSpecName: "whereabouts-flatfile-configmap") pod "869851b9-7ffb-4af0-b166-1d8aa40a5f80" (UID: "869851b9-7ffb-4af0-b166-1d8aa40a5f80"). InnerVolumeSpecName "whereabouts-flatfile-configmap". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.925516 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-service-ca" (OuterVolumeSpecName: "service-ca") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.925953 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92dfbade-90b6-4169-8c07-72cff7f2c82b-kube-api-access-4g8ts" (OuterVolumeSpecName: "kube-api-access-4g8ts") pod "92dfbade-90b6-4169-8c07-72cff7f2c82b" (UID: "92dfbade-90b6-4169-8c07-72cff7f2c82b"). InnerVolumeSpecName "kube-api-access-4g8ts". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926378 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-run-k8s-cni-cncf-io\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926432 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-netd\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926452 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926475 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-socket-dir-parent\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926491 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-etc-openvswitch\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926508 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-node-log\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926532 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-config\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926573 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-d9jb2\" (UniqueName: \"kubernetes.io/projected/7be2f013-d656-48d9-b332-e66e20efa66f-kube-api-access-d9jb2\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926597 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-ovn\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926620 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/dff39619-cf4b-4c00-8d99-71c924fcf4c2-rootfs\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926648 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-var-lib-cni-bin\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926670 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-nbv85\" (UniqueName: \"kubernetes.io/projected/f00bde3a-9397-4146-a9c4-22c9093d1608-kube-api-access-nbv85\") pod \"node-ca-g8ccx\" (UID: \"f00bde3a-9397-4146-a9c4-22c9093d1608\") " pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926695 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926717 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/dff39619-cf4b-4c00-8d99-71c924fcf4c2-proxy-tls\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926760 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f00bde3a-9397-4146-a9c4-22c9093d1608-serviceca\") pod \"node-ca-g8ccx\" (UID: \"f00bde3a-9397-4146-a9c4-22c9093d1608\") " pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926778 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-config" (OuterVolumeSpecName: "console-config") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926787 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7be2f013-d656-48d9-b332-e66e20efa66f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926839 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "fc8db2c7-859d-47b3-a900-2bd0c0b2973b" (UID: "fc8db2c7-859d-47b3-a900-2bd0c0b2973b"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926845 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-log-socket\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.926936 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-config" (OuterVolumeSpecName: "config") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.927116 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.927192 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cc85e424-18b2-4924-920b-bd291a8c4b01" (UID: "cc85e424-18b2-4924-920b-bd291a8c4b01"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.927431 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-run-k8s-cni-cncf-io\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.927491 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-socket-dir-parent\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.927722 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7599e0b6-bddf-4def-b7f2-0b32206e8651-kube-api-access-ptkcf" (OuterVolumeSpecName: "kube-api-access-ptkcf") pod "7599e0b6-bddf-4def-b7f2-0b32206e8651" (UID: "7599e0b6-bddf-4def-b7f2-0b32206e8651"). InnerVolumeSpecName "kube-api-access-ptkcf". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.927730 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7be2f013-d656-48d9-b332-e66e20efa66f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.927791 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-var-lib-cni-bin\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928449 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-kube-api-access-hckvg" (OuterVolumeSpecName: "kube-api-access-hckvg") pod "fc8db2c7-859d-47b3-a900-2bd0c0b2973b" (UID: "fc8db2c7-859d-47b3-a900-2bd0c0b2973b"). InnerVolumeSpecName "kube-api-access-hckvg". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928499 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-kubelet\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928517 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928541 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-conf-dir\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928567 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-os-release\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928583 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-run-netns\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928602 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-var-lib-kubelet\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928619 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"whereabouts-flatfile-configmap\" (UniqueName: \"kubernetes.io/configmap/7be2f013-d656-48d9-b332-e66e20efa66f-whereabouts-flatfile-configmap\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928637 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7337c888-01aa-4a6b-b494-7a51eff39634-ovn-node-metrics-cert\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928654 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-env-overrides\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928674 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qznm\" (UniqueName: \"kubernetes.io/projected/976aa95a-addb-4ae4-9ec8-efd0863c66af-kube-api-access-4qznm\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928690 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5k6t4\" (UniqueName: \"kubernetes.io/projected/dff39619-cf4b-4c00-8d99-71c924fcf4c2-kube-api-access-5k6t4\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928708 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0cf99dcb-47cd-4077-9fb1-e39bf209e431-cni-binary-copy\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928723 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-hostroot\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928730 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f00bde3a-9397-4146-a9c4-22c9093d1608-serviceca\") pod \"node-ca-g8ccx\" (UID: \"f00bde3a-9397-4146-a9c4-22c9093d1608\") " pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928742 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-netns\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928764 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/dff39619-cf4b-4c00-8d99-71c924fcf4c2-mcd-auth-proxy-config\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928753 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7e8f42f-dc0e-424b-bb56-5ec849834888-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d7e8f42f-dc0e-424b-bb56-5ec849834888" (UID: "d7e8f42f-dc0e-424b-bb56-5ec849834888"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928788 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-cni-dir\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.928817 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-var-lib-cni-multus\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929014 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-utilities" (OuterVolumeSpecName: "utilities") pod "584e1f4a-8205-47d7-8efb-3afc6017c4c9" (UID: "584e1f4a-8205-47d7-8efb-3afc6017c4c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929054 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-hostroot\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929100 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-utilities" (OuterVolumeSpecName: "utilities") pod "149b3c48-e17c-4a66-a835-d86dabf6ff13" (UID: "149b3c48-e17c-4a66-a835-d86dabf6ff13"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929460 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-kube-api-access-pgx6b" (OuterVolumeSpecName: "kube-api-access-pgx6b") pod "f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4" (UID: "f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4"). InnerVolumeSpecName "kube-api-access-pgx6b". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929469 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-cni-dir\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929495 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-var-lib-cni-multus\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929692 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4qqn\" (UniqueName: \"kubernetes.io/projected/7337c888-01aa-4a6b-b494-7a51eff39634-kube-api-access-r4qqn\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929739 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-cnibin\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929764 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-os-release\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929784 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-daemon-config\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929784 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"whereabouts-flatfile-configmap\" (UniqueName: \"kubernetes.io/configmap/7be2f013-d656-48d9-b332-e66e20efa66f-whereabouts-flatfile-configmap\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929806 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-run-multus-certs\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929824 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-var-lib-openvswitch\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929886 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-env-overrides\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929958 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0cf99dcb-47cd-4077-9fb1-e39bf209e431-cni-binary-copy\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.929977 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-var-lib-kubelet\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930025 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42a11a02-47e1-488f-b270-2679d3298b0e-kube-api-access-qgrkj" (OuterVolumeSpecName: "kube-api-access-qgrkj") pod "42a11a02-47e1-488f-b270-2679d3298b0e" (UID: "42a11a02-47e1-488f-b270-2679d3298b0e"). InnerVolumeSpecName "kube-api-access-qgrkj". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930050 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-cnibin\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930105 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-conf-dir\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930235 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f00bde3a-9397-4146-a9c4-22c9093d1608-host\") pod \"node-ca-g8ccx\" (UID: \"f00bde3a-9397-4146-a9c4-22c9093d1608\") " pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930258 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930279 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-systemd-units\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930312 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-slash\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930335 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-ovn-kubernetes\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930375 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-7rgpd\" (UniqueName: \"kubernetes.io/projected/0cf99dcb-47cd-4077-9fb1-e39bf209e431-kube-api-access-7rgpd\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930417 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-system-cni-dir\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930467 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-script-lib\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930482 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f00bde3a-9397-4146-a9c4-22c9093d1608-host\") pod \"node-ca-g8ccx\" (UID: \"f00bde3a-9397-4146-a9c4-22c9093d1608\") " pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930494 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-system-cni-dir\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930540 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-system-cni-dir\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930560 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-etc-kubernetes\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930771 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930569 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-system-cni-dir\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930807 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-run-multus-certs\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930777 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0cf99dcb-47cd-4077-9fb1-e39bf209e431-multus-daemon-config\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930836 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-etc-kubernetes\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930946 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-host-run-netns\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.930998 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-os-release\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931042 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-systemd\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931055 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0cf99dcb-47cd-4077-9fb1-e39bf209e431-os-release\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931074 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-bin\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931099 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvb5c\" (UniqueName: \"kubernetes.io/projected/64ebdc45-679c-4414-84fa-805ed5d07898-kube-api-access-vvb5c\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931137 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-cnibin\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931166 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-openvswitch\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931203 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7be2f013-d656-48d9-b332-e66e20efa66f-cnibin\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931230 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovnkube-config\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931354 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7be2f013-d656-48d9-b332-e66e20efa66f-cni-binary-copy\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931467 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-hckvg\" (UniqueName: \"kubernetes.io/projected/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-kube-api-access-hckvg\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931478 5119 reconciler_common.go:299] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c491984c-7d4b-44aa-8c1e-d7974424fa47-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931488 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09cfa50b-4138-4585-a53e-64dd3ab73335-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931496 5119 reconciler_common.go:299] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931506 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-d7cps\" (UniqueName: \"kubernetes.io/projected/af41de71-79cf-4590-bbe9-9e8b848862cb-kube-api-access-d7cps\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931514 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931524 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-tkdh6\" (UniqueName: \"kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-kube-api-access-tkdh6\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931535 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-l9stx\" (UniqueName: \"kubernetes.io/projected/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-kube-api-access-l9stx\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931543 5119 reconciler_common.go:299] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-audit\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931552 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-6g4lr\" (UniqueName: \"kubernetes.io/projected/f7e2c886-118e-43bb-bef1-c78134de392b-kube-api-access-6g4lr\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931560 5119 reconciler_common.go:299] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931568 5119 reconciler_common.go:299] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-images\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931577 5119 reconciler_common.go:299] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931585 5119 reconciler_common.go:299] "Volume detached for volume \"ca-trust-extracted-pem\" (UniqueName: \"kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-ca-trust-extracted-pem\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931594 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09cfa50b-4138-4585-a53e-64dd3ab73335-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931606 5119 reconciler_common.go:299] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931617 5119 reconciler_common.go:299] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931628 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7afa918d-be67-40a6-803c-d3b0ae99d815-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931638 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-pddnv\" (UniqueName: \"kubernetes.io/projected/e093be35-bb62-4843-b2e8-094545761610-kube-api-access-pddnv\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931646 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7599e0b6-bddf-4def-b7f2-0b32206e8651-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931654 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931663 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xxfcv\" (UniqueName: \"kubernetes.io/projected/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-kube-api-access-xxfcv\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931672 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-pgx6b\" (UniqueName: \"kubernetes.io/projected/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-kube-api-access-pgx6b\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931680 5119 reconciler_common.go:299] "Volume detached for volume \"whereabouts-flatfile-configmap\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-whereabouts-flatfile-configmap\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931689 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-grwfz\" (UniqueName: \"kubernetes.io/projected/31fa8943-81cc-4750-a0b7-0fa9ab5af883-kube-api-access-grwfz\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931701 5119 reconciler_common.go:299] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931712 5119 reconciler_common.go:299] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931725 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931744 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01080b46-74f1-4191-8755-5152a57b3b25-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931752 5119 reconciler_common.go:299] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931760 5119 reconciler_common.go:299] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931770 5119 reconciler_common.go:299] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931779 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-mfzkj\" (UniqueName: \"kubernetes.io/projected/0effdbcf-dd7d-404d-9d48-77536d665a5d-kube-api-access-mfzkj\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931790 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.931802 5119 reconciler_common.go:299] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932282 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4g8ts\" (UniqueName: \"kubernetes.io/projected/92dfbade-90b6-4169-8c07-72cff7f2c82b-kube-api-access-4g8ts\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932292 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-nmmzf\" (UniqueName: \"kubernetes.io/projected/7df94c10-441d-4386-93a6-6730fb7bcde0-kube-api-access-nmmzf\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932301 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-hm9x7\" (UniqueName: \"kubernetes.io/projected/f559dfa3-3917-43a2-97f6-61ddfda10e93-kube-api-access-hm9x7\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932310 5119 reconciler_common.go:299] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6077b63e-53a2-4f96-9d56-1ce0324e4913-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932318 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/736c54fe-349c-4bb9-870a-d1c1d1c03831-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932326 5119 reconciler_common.go:299] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932334 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-wj4qr\" (UniqueName: \"kubernetes.io/projected/149b3c48-e17c-4a66-a835-d86dabf6ff13-kube-api-access-wj4qr\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932343 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932351 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f65c0ac1-8bca-454d-a2e6-e35cb418beac-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932359 5119 reconciler_common.go:299] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932368 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ddlk9\" (UniqueName: \"kubernetes.io/projected/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-kube-api-access-ddlk9\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932376 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-mjwtd\" (UniqueName: \"kubernetes.io/projected/869851b9-7ffb-4af0-b166-1d8aa40a5f80-kube-api-access-mjwtd\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932452 5119 reconciler_common.go:299] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932463 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932472 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932482 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932490 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a555ff2e-0be6-46d5-897d-863bb92ae2b3-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932499 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ftwb6\" (UniqueName: \"kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-kube-api-access-ftwb6\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932509 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932521 5119 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/6077b63e-53a2-4f96-9d56-1ce0324e4913-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932532 5119 reconciler_common.go:299] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932543 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-l87hs\" (UniqueName: \"kubernetes.io/projected/5ebfebf6-3ecd-458e-943f-bb25b52e2718-kube-api-access-l87hs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932554 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932566 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xnxbn\" (UniqueName: \"kubernetes.io/projected/ce090a97-9ab6-4c40-a719-64ff2acd9778-kube-api-access-xnxbn\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932579 5119 reconciler_common.go:299] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932590 5119 reconciler_common.go:299] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932599 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932610 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4hb7m\" (UniqueName: \"kubernetes.io/projected/94a6e063-3d1a-4d44-875d-185291448c31-kube-api-access-4hb7m\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932620 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-twvbl\" (UniqueName: \"kubernetes.io/projected/b4750666-1362-4001-abd0-6f89964cc621-kube-api-access-twvbl\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932616 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7be2f013-d656-48d9-b332-e66e20efa66f-cni-binary-copy\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932632 5119 reconciler_common.go:299] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932642 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7afa918d-be67-40a6-803c-d3b0ae99d815-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932652 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-qqbfk\" (UniqueName: \"kubernetes.io/projected/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-kube-api-access-qqbfk\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932661 5119 reconciler_common.go:299] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932669 5119 reconciler_common.go:299] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932678 5119 reconciler_common.go:299] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932686 5119 reconciler_common.go:299] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b4750666-1362-4001-abd0-6f89964cc621-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932694 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932703 5119 reconciler_common.go:299] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932711 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-99zj9\" (UniqueName: \"kubernetes.io/projected/d565531a-ff86-4608-9d19-767de01ac31b-kube-api-access-99zj9\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932719 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/7afa918d-be67-40a6-803c-d3b0ae99d815-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932727 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d565531a-ff86-4608-9d19-767de01ac31b-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932736 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-7jjkz\" (UniqueName: \"kubernetes.io/projected/301e1965-1754-483d-b6cc-bfae7038bbca-kube-api-access-7jjkz\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932745 5119 reconciler_common.go:299] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932753 5119 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f7e2c886-118e-43bb-bef1-c78134de392b-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932761 5119 reconciler_common.go:299] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932769 5119 reconciler_common.go:299] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932777 5119 reconciler_common.go:299] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/6ee8fbd3-1f81-4666-96da-5afc70819f1a-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932787 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xfp5s\" (UniqueName: \"kubernetes.io/projected/cc85e424-18b2-4924-920b-bd291a8c4b01-kube-api-access-xfp5s\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932795 5119 reconciler_common.go:299] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932806 5119 reconciler_common.go:299] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18f80adb-c1c3-49ba-8ee4-932c851d3897-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932814 5119 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/92dfbade-90b6-4169-8c07-72cff7f2c82b-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932822 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d7e8f42f-dc0e-424b-bb56-5ec849834888-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932830 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932838 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-dztfv\" (UniqueName: \"kubernetes.io/projected/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-kube-api-access-dztfv\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932846 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-6rmnv\" (UniqueName: \"kubernetes.io/projected/b605f283-6f2e-42da-a838-54421690f7d0-kube-api-access-6rmnv\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932854 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f65c0ac1-8bca-454d-a2e6-e35cb418beac-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932862 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/736c54fe-349c-4bb9-870a-d1c1d1c03831-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932870 5119 reconciler_common.go:299] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/16bdd140-dce1-464c-ab47-dd5798d1d256-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932878 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-9vsz9\" (UniqueName: \"kubernetes.io/projected/c491984c-7d4b-44aa-8c1e-d7974424fa47-kube-api-access-9vsz9\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932886 5119 reconciler_common.go:299] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-images\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932894 5119 reconciler_common.go:299] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7df94c10-441d-4386-93a6-6730fb7bcde0-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932902 5119 reconciler_common.go:299] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932913 5119 reconciler_common.go:299] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a208c9c2-333b-4b4a-be0d-bc32ec38a821-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932925 5119 reconciler_common.go:299] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932938 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8nspp\" (UniqueName: \"kubernetes.io/projected/a7a88189-c967-4640-879e-27665747f20c-kube-api-access-8nspp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932949 5119 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/c5f2bfad-70f6-4185-a3d9-81ce12720767-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932958 5119 reconciler_common.go:299] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932969 5119 reconciler_common.go:299] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a7a88189-c967-4640-879e-27665747f20c-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932982 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16bdd140-dce1-464c-ab47-dd5798d1d256-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.932993 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933005 5119 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/567683bd-0efc-4f21-b076-e28559628404-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933015 5119 reconciler_common.go:299] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/5ebfebf6-3ecd-458e-943f-bb25b52e2718-serviceca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933025 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-26xrl\" (UniqueName: \"kubernetes.io/projected/a208c9c2-333b-4b4a-be0d-bc32ec38a821-kube-api-access-26xrl\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933034 5119 reconciler_common.go:299] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933042 5119 reconciler_common.go:299] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933050 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7e8f42f-dc0e-424b-bb56-5ec849834888-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933057 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933065 5119 reconciler_common.go:299] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933073 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933081 5119 reconciler_common.go:299] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933089 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-6dmhf\" (UniqueName: \"kubernetes.io/projected/736c54fe-349c-4bb9-870a-d1c1d1c03831-kube-api-access-6dmhf\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933097 5119 reconciler_common.go:299] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933105 5119 reconciler_common.go:299] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/42a11a02-47e1-488f-b270-2679d3298b0e-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933114 5119 reconciler_common.go:299] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d7e8f42f-dc0e-424b-bb56-5ec849834888-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933123 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933131 5119 reconciler_common.go:299] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933139 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-z5rsr\" (UniqueName: \"kubernetes.io/projected/af33e427-6803-48c2-a76a-dd9deb7cbf9a-kube-api-access-z5rsr\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933147 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7afa918d-be67-40a6-803c-d3b0ae99d815-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933155 5119 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f65c0ac1-8bca-454d-a2e6-e35cb418beac-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933163 5119 reconciler_common.go:299] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933171 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-5lcfw\" (UniqueName: \"kubernetes.io/projected/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-kube-api-access-5lcfw\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933179 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zth6t\" (UniqueName: \"kubernetes.io/projected/6077b63e-53a2-4f96-9d56-1ce0324e4913-kube-api-access-zth6t\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933187 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zg8nc\" (UniqueName: \"kubernetes.io/projected/2325ffef-9d5b-447f-b00e-3efc429acefe-kube-api-access-zg8nc\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933196 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-w94wk\" (UniqueName: \"kubernetes.io/projected/01080b46-74f1-4191-8755-5152a57b3b25-kube-api-access-w94wk\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933206 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-pllx6\" (UniqueName: \"kubernetes.io/projected/81e39f7b-62e4-4fc9-992a-6535ce127a02-kube-api-access-pllx6\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933213 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zsb9b\" (UniqueName: \"kubernetes.io/projected/09cfa50b-4138-4585-a53e-64dd3ab73335-kube-api-access-zsb9b\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933222 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933230 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a555ff2e-0be6-46d5-897d-863bb92ae2b3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933237 5119 reconciler_common.go:299] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9f71a554-e414-4bc3-96d2-674060397afe-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933245 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-tknt7\" (UniqueName: \"kubernetes.io/projected/584e1f4a-8205-47d7-8efb-3afc6017c4c9-kube-api-access-tknt7\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933255 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933262 5119 reconciler_common.go:299] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933270 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933280 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-94l9h\" (UniqueName: \"kubernetes.io/projected/16bdd140-dce1-464c-ab47-dd5798d1d256-kube-api-access-94l9h\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933289 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933297 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-sbc2l\" (UniqueName: \"kubernetes.io/projected/593a3561-7760-45c5-8f91-5aaef7475d0f-kube-api-access-sbc2l\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933306 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933314 5119 reconciler_common.go:299] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/92dfbade-90b6-4169-8c07-72cff7f2c82b-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933322 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933330 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7599e0b6-bddf-4def-b7f2-0b32206e8651-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933338 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-m5lgh\" (UniqueName: \"kubernetes.io/projected/d19cb085-0c5b-4810-b654-ce7923221d90-kube-api-access-m5lgh\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933369 5119 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/92dfbade-90b6-4169-8c07-72cff7f2c82b-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933379 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933401 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933411 5119 reconciler_common.go:299] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933421 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933429 5119 reconciler_common.go:299] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933437 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933445 5119 reconciler_common.go:299] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-key\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933454 5119 reconciler_common.go:299] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933462 5119 reconciler_common.go:299] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933471 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933479 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ptkcf\" (UniqueName: \"kubernetes.io/projected/7599e0b6-bddf-4def-b7f2-0b32206e8651-kube-api-access-ptkcf\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933488 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933495 5119 reconciler_common.go:299] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933504 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-q4smf\" (UniqueName: \"kubernetes.io/projected/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-kube-api-access-q4smf\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933512 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933520 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c5f2bfad-70f6-4185-a3d9-81ce12720767-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933528 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ws8zz\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-kube-api-access-ws8zz\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933537 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-m26jq\" (UniqueName: \"kubernetes.io/projected/567683bd-0efc-4f21-b076-e28559628404-kube-api-access-m26jq\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933545 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01080b46-74f1-4191-8755-5152a57b3b25-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933553 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933562 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8pskd\" (UniqueName: \"kubernetes.io/projected/a555ff2e-0be6-46d5-897d-863bb92ae2b3-kube-api-access-8pskd\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933570 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933578 5119 reconciler_common.go:299] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933586 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933594 5119 reconciler_common.go:299] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933602 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-rzt4w\" (UniqueName: \"kubernetes.io/projected/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-kube-api-access-rzt4w\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933610 5119 reconciler_common.go:299] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933621 5119 reconciler_common.go:299] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933629 5119 reconciler_common.go:299] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9e9b5059-1b3e-4067-a63d-2952cbe863af-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933637 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933644 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933652 5119 reconciler_common.go:299] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933693 5119 reconciler_common.go:299] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933702 5119 reconciler_common.go:299] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/301e1965-1754-483d-b6cc-bfae7038bbca-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933709 5119 reconciler_common.go:299] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.933718 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-qgrkj\" (UniqueName: \"kubernetes.io/projected/42a11a02-47e1-488f-b270-2679d3298b0e-kube-api-access-qgrkj\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.937270 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31fa8943-81cc-4750-a0b7-0fa9ab5af883" (UID: "31fa8943-81cc-4750-a0b7-0fa9ab5af883"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.939708 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2325ffef-9d5b-447f-b00e-3efc429acefe-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2325ffef-9d5b-447f-b00e-3efc429acefe" (UID: "2325ffef-9d5b-447f-b00e-3efc429acefe"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.940539 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.941259 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.942944 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.943041 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.943802 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00289c64-f714-4516-af94-db01b82df194\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"},\\\"containerID\\\":\\\"cri-o://71ec6c2a4f2b4ceaf5bd2fe00c0dcc945915014237a6dfe3044ada4899a26c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-bundle-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://8d35f4604e32d0d4804a3b34156ed8698a40a743f7ce10ed428780839daeab66\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://5cc2f86492ba54c66d9c6c1a9a34f75bf42fbaa9909b03d0311777b0c0a3795c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T00:11:06Z\\\",\\\"message\\\":\\\"172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nW0130 00:11:05.502958 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 00:11:05.503112 1 builder.go:304] check-endpoints version v0.0.0-unknown-c3d9642-c3d9642\\\\nI0130 00:11:05.504002 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1044629914/tls.crt::/tmp/serving-cert-1044629914/tls.key\\\\\\\"\\\\nI0130 00:11:06.510687 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 00:11:06.512127 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 00:11:06.512141 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 00:11:06.512164 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 00:11:06.512172 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 00:11:06.515444 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0130 00:11:06.515452 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 00:11:06.515483 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:11:06.515488 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:11:06.515492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 00:11:06.515495 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 00:11:06.515498 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 00:11:06.515500 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 00:11:06.517131 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T00:11:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://3ec64836842604d724c5b3fc6e03787859f37bb6f3f2d868b57963814407dba3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://6cb744f13386a9d31a521fa5f1bf2bc1e5c867622a28725ebe5ec96adaed410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cb744f13386a9d31a521fa5f1bf2bc1e5c867622a28725ebe5ec96adaed410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.944088 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-config" (OuterVolumeSpecName: "config") pod "736c54fe-349c-4bb9-870a-d1c1d1c03831" (UID: "736c54fe-349c-4bb9-870a-d1c1d1c03831"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.944518 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.944730 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18f80adb-c1c3-49ba-8ee4-932c851d3897-kube-api-access-wbmqg" (OuterVolumeSpecName: "kube-api-access-wbmqg") pod "18f80adb-c1c3-49ba-8ee4-932c851d3897" (UID: "18f80adb-c1c3-49ba-8ee4-932c851d3897"). InnerVolumeSpecName "kube-api-access-wbmqg". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.944756 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ee8fbd3-1f81-4666-96da-5afc70819f1a-kube-api-access-d4tqq" (OuterVolumeSpecName: "kube-api-access-d4tqq") pod "6ee8fbd3-1f81-4666-96da-5afc70819f1a" (UID: "6ee8fbd3-1f81-4666-96da-5afc70819f1a"). InnerVolumeSpecName "kube-api-access-d4tqq". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.944830 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-utilities" (OuterVolumeSpecName: "utilities") pod "b605f283-6f2e-42da-a838-54421690f7d0" (UID: "b605f283-6f2e-42da-a838-54421690f7d0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.944847 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.944980 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f71a554-e414-4bc3-96d2-674060397afe-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9f71a554-e414-4bc3-96d2-674060397afe" (UID: "9f71a554-e414-4bc3-96d2-674060397afe"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.945777 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" (UID: "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.945905 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" (UID: "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.946279 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f65c0ac1-8bca-454d-a2e6-e35cb418beac-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f65c0ac1-8bca-454d-a2e6-e35cb418beac" (UID: "f65c0ac1-8bca-454d-a2e6-e35cb418beac"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.946299 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.946430 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-utilities" (OuterVolumeSpecName: "utilities") pod "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" (UID: "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.946572 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-tmp" (OuterVolumeSpecName: "tmp") pod "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" (UID: "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.946687 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-config" (OuterVolumeSpecName: "config") pod "2325ffef-9d5b-447f-b00e-3efc429acefe" (UID: "2325ffef-9d5b-447f-b00e-3efc429acefe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.946909 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-kube-api-access-9z4sw" (OuterVolumeSpecName: "kube-api-access-9z4sw") pod "e1d2a42d-af1d-4054-9618-ab545e0ed8b7" (UID: "e1d2a42d-af1d-4054-9618-ab545e0ed8b7"). InnerVolumeSpecName "kube-api-access-9z4sw". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.946927 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.947005 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5f2bfad-70f6-4185-a3d9-81ce12720767-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c5f2bfad-70f6-4185-a3d9-81ce12720767" (UID: "c5f2bfad-70f6-4185-a3d9-81ce12720767"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.947048 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "ce090a97-9ab6-4c40-a719-64ff2acd9778" (UID: "ce090a97-9ab6-4c40-a719-64ff2acd9778"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.947262 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4750666-1362-4001-abd0-6f89964cc621-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "b4750666-1362-4001-abd0-6f89964cc621" (UID: "b4750666-1362-4001-abd0-6f89964cc621"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.947282 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.947378 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.947422 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "18f80adb-c1c3-49ba-8ee4-932c851d3897" (UID: "18f80adb-c1c3-49ba-8ee4-932c851d3897"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.947435 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "7df94c10-441d-4386-93a6-6730fb7bcde0" (UID: "7df94c10-441d-4386-93a6-6730fb7bcde0"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.947830 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-kube-api-access-ks6v2" (OuterVolumeSpecName: "kube-api-access-ks6v2") pod "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" (UID: "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a"). InnerVolumeSpecName "kube-api-access-ks6v2". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.947900 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-config" (OuterVolumeSpecName: "config") pod "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" (UID: "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.949192 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rgpd\" (UniqueName: \"kubernetes.io/projected/0cf99dcb-47cd-4077-9fb1-e39bf209e431-kube-api-access-7rgpd\") pod \"multus-qxpww\" (UID: \"0cf99dcb-47cd-4077-9fb1-e39bf209e431\") " pod="openshift-multus/multus-qxpww" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.949478 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbv85\" (UniqueName: \"kubernetes.io/projected/f00bde3a-9397-4146-a9c4-22c9093d1608-kube-api-access-nbv85\") pod \"node-ca-g8ccx\" (UID: \"f00bde3a-9397-4146-a9c4-22c9093d1608\") " pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.950543 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "149b3c48-e17c-4a66-a835-d86dabf6ff13" (UID: "149b3c48-e17c-4a66-a835-d86dabf6ff13"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.951728 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.954560 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6edfcf45-925b-4eff-b940-95b6fc0b85d4-kube-api-access-8nb9c" (OuterVolumeSpecName: "kube-api-access-8nb9c") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "kube-api-access-8nb9c". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.955157 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f46b0126-7822-4db5-9ef8-f7cc9793bda3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://a111fb94f1a5c8692743f3b12a7215a12315afa40d7e89530817ba3c83892220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://25355ecadd6dcd398c71e3a7073c9f69211f36b2afac9f2f6984d6df7ac981fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://76c06870c12f5d5668f1c7b7bc8ce2a5614be4f6683f4be3deec72bd8c765802\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://9806c9a05c8e04261d45e139bdd205f5599c68f41d7290cdd95fb5dc65be4755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9806c9a05c8e04261d45e139bdd205f5599c68f41d7290cdd95fb5dc65be4755\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.955317 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.955532 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "18f80adb-c1c3-49ba-8ee4-932c851d3897" (UID: "18f80adb-c1c3-49ba-8ee4-932c851d3897"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.955806 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5f2bfad-70f6-4185-a3d9-81ce12720767-config" (OuterVolumeSpecName: "config") pod "c5f2bfad-70f6-4185-a3d9-81ce12720767" (UID: "c5f2bfad-70f6-4185-a3d9-81ce12720767"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.956145 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9jb2\" (UniqueName: \"kubernetes.io/projected/7be2f013-d656-48d9-b332-e66e20efa66f-kube-api-access-d9jb2\") pod \"multus-additional-cni-plugins-w5d5n\" (UID: \"7be2f013-d656-48d9-b332-e66e20efa66f\") " pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.956213 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: W0130 00:11:23.963276 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34177974_8d82_49d2_a763_391d0df3bbd8.slice/crio-1d5db4a777b51284ac22a85e5a2d13cf34a2e74b6ff5a63f161a43b0ed3f7fea WatchSource:0}: Error finding container 1d5db4a777b51284ac22a85e5a2d13cf34a2e74b6ff5a63f161a43b0ed3f7fea: Status 404 returned error can't find the container with id 1d5db4a777b51284ac22a85e5a2d13cf34a2e74b6ff5a63f161a43b0ed3f7fea Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.964610 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" (UID: "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: W0130 00:11:23.965509 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfc4541ce_7789_4670_bc75_5c2868e52ce0.slice/crio-d75a4a2cb143493aed4e7722f70eead5282a2aed2a3a390389f462d9a3996f32 WatchSource:0}: Error finding container d75a4a2cb143493aed4e7722f70eead5282a2aed2a3a390389f462d9a3996f32: Status 404 returned error can't find the container with id d75a4a2cb143493aed4e7722f70eead5282a2aed2a3a390389f462d9a3996f32 Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.971616 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.979086 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.979131 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.979418 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.979442 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.979455 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.980694 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e9b5059-1b3e-4067-a63d-2952cbe863af-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.980807 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.986489 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94a6e063-3d1a-4d44-875d-185291448c31" (UID: "94a6e063-3d1a-4d44-875d-185291448c31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.986607 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:23 crc kubenswrapper[5119]: I0130 00:11:23.990350 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dff39619-cf4b-4c00-8d99-71c924fcf4c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k6t4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k6t4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hf5dd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.000448 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa6890cd-00f8-4ffe-b5bb-bba28a4aec8c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://26f561423f825ded7c40148a15fe8ae193d72852cc00f4c38d26a15e0459e067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://b1318090dd33aa89fd1dbebbe525d86fe9687c107990e001fccdb19bff19c2fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://19610627b078e161daa90c9332e976149504a06e21fdad1a326056db66c382e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://24d0493bc380958b22799e8abc55dbf5ba40bd473d68ac50594d66012108fb54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: W0130 00:11:24.002869 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod428b39f5_eb1c_4f65_b7a4_eeb6e84860cc.slice/crio-d99bed4028639f6071dfd445b63cd6cdf655fe3b6033f3df52495f04afa038e2 WatchSource:0}: Error finding container d99bed4028639f6071dfd445b63cd6cdf655fe3b6033f3df52495f04afa038e2: Status 404 returned error can't find the container with id d99bed4028639f6071dfd445b63cd6cdf655fe3b6033f3df52495f04afa038e2 Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.009051 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.014635 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-dns/node-resolver-7wgxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477de9be-7588-4409-8970-8585874094e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d2nqp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7wgxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.030000 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g8ccx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f00bde3a-9397-4146-a9c4-22c9093d1608\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nbv85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g8ccx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.036840 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-kubelet\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.036885 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.036911 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7337c888-01aa-4a6b-b494-7a51eff39634-ovn-node-metrics-cert\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.036934 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-env-overrides\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.036956 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-4qznm\" (UniqueName: \"kubernetes.io/projected/976aa95a-addb-4ae4-9ec8-efd0863c66af-kube-api-access-4qznm\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.036979 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-5k6t4\" (UniqueName: \"kubernetes.io/projected/dff39619-cf4b-4c00-8d99-71c924fcf4c2-kube-api-access-5k6t4\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037000 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-netns\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037020 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/dff39619-cf4b-4c00-8d99-71c924fcf4c2-mcd-auth-proxy-config\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037040 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-r4qqn\" (UniqueName: \"kubernetes.io/projected/7337c888-01aa-4a6b-b494-7a51eff39634-kube-api-access-r4qqn\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037062 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-var-lib-openvswitch\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037082 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-env-overrides\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037103 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-systemd-units\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037122 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-slash\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037141 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-ovn-kubernetes\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037171 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-script-lib\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037194 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-systemd\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037213 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-bin\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037233 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-vvb5c\" (UniqueName: \"kubernetes.io/projected/64ebdc45-679c-4414-84fa-805ed5d07898-kube-api-access-vvb5c\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037264 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-openvswitch\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037285 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovnkube-config\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037309 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-netd\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037331 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037353 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-etc-openvswitch\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037373 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-node-log\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037413 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-config\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037444 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-ovn\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037464 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/dff39619-cf4b-4c00-8d99-71c924fcf4c2-rootfs\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037487 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037508 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/dff39619-cf4b-4c00-8d99-71c924fcf4c2-proxy-tls\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037541 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-log-socket\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037581 5119 reconciler_common.go:299] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037595 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f65c0ac1-8bca-454d-a2e6-e35cb418beac-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037607 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2325ffef-9d5b-447f-b00e-3efc429acefe-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037619 5119 reconciler_common.go:299] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037631 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037643 5119 reconciler_common.go:299] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037654 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b4750666-1362-4001-abd0-6f89964cc621-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037667 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037678 5119 reconciler_common.go:299] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9e9b5059-1b3e-4067-a63d-2952cbe863af-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037689 5119 reconciler_common.go:299] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037700 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-9z4sw\" (UniqueName: \"kubernetes.io/projected/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-kube-api-access-9z4sw\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037739 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c5f2bfad-70f6-4185-a3d9-81ce12720767-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037753 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037766 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ks6v2\" (UniqueName: \"kubernetes.io/projected/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-kube-api-access-ks6v2\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037777 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037791 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037805 5119 reconciler_common.go:299] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037815 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037827 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037839 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037850 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037861 5119 reconciler_common.go:299] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037872 5119 reconciler_common.go:299] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037884 5119 reconciler_common.go:299] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037895 5119 reconciler_common.go:299] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f71a554-e414-4bc3-96d2-674060397afe-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037907 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-d4tqq\" (UniqueName: \"kubernetes.io/projected/6ee8fbd3-1f81-4666-96da-5afc70819f1a-kube-api-access-d4tqq\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037921 5119 reconciler_common.go:299] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037933 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8nb9c\" (UniqueName: \"kubernetes.io/projected/6edfcf45-925b-4eff-b940-95b6fc0b85d4-kube-api-access-8nb9c\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037946 5119 reconciler_common.go:299] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037957 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5f2bfad-70f6-4185-a3d9-81ce12720767-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037968 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037979 5119 reconciler_common.go:299] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.037992 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038005 5119 reconciler_common.go:299] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038017 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038030 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038043 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-wbmqg\" (UniqueName: \"kubernetes.io/projected/18f80adb-c1c3-49ba-8ee4-932c851d3897-kube-api-access-wbmqg\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038054 5119 reconciler_common.go:299] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038065 5119 reconciler_common.go:299] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038115 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-log-socket\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038157 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-systemd\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038188 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-bin\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038334 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-node-log\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038380 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/dff39619-cf4b-4c00-8d99-71c924fcf4c2-rootfs\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038384 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-openvswitch\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038470 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-ovn\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038668 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-script-lib\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038742 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-kubelet\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.038801 5119 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.038857 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs podName:64ebdc45-679c-4414-84fa-805ed5d07898 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.538839934 +0000 UTC m=+88.552902473 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs") pod "network-metrics-daemon-8gjq7" (UID: "64ebdc45-679c-4414-84fa-805ed5d07898") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038967 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovnkube-config\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.038990 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-config\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039007 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-netd\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039112 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-var-lib-openvswitch\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039176 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039178 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-slash\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039207 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-systemd-units\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039224 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-etc-openvswitch\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039252 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-ovn-kubernetes\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039359 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-netns\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039548 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-env-overrides\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039590 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-7wgxz" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039596 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-env-overrides\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.039949 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/dff39619-cf4b-4c00-8d99-71c924fcf4c2-mcd-auth-proxy-config\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.042377 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/dff39619-cf4b-4c00-8d99-71c924fcf4c2-proxy-tls\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.042491 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.042583 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7337c888-01aa-4a6b-b494-7a51eff39634-ovn-node-metrics-cert\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.049577 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-g8ccx" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.055800 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" Jan 30 00:11:24 crc kubenswrapper[5119]: W0130 00:11:24.055991 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod477de9be_7588_4409_8970_8585874094e8.slice/crio-0fd39bf11ba25ceba4cef7b97d23562d0b3d504c1abb810f831643ba9f6b5db4 WatchSource:0}: Error finding container 0fd39bf11ba25ceba4cef7b97d23562d0b3d504c1abb810f831643ba9f6b5db4: Status 404 returned error can't find the container with id 0fd39bf11ba25ceba4cef7b97d23562d0b3d504c1abb810f831643ba9f6b5db4 Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.063271 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-qxpww" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.079175 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvb5c\" (UniqueName: \"kubernetes.io/projected/64ebdc45-679c-4414-84fa-805ed5d07898-kube-api-access-vvb5c\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.080994 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.081762 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.081774 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.081786 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.081798 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.097971 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4qqn\" (UniqueName: \"kubernetes.io/projected/7337c888-01aa-4a6b-b494-7a51eff39634-kube-api-access-r4qqn\") pod \"ovnkube-node-nwvqg\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.118088 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-5k6t4\" (UniqueName: \"kubernetes.io/projected/dff39619-cf4b-4c00-8d99-71c924fcf4c2-kube-api-access-5k6t4\") pod \"machine-config-daemon-hf5dd\" (UID: \"dff39619-cf4b-4c00-8d99-71c924fcf4c2\") " pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.139151 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qznm\" (UniqueName: \"kubernetes.io/projected/976aa95a-addb-4ae4-9ec8-efd0863c66af-kube-api-access-4qznm\") pod \"ovnkube-control-plane-57b78d8988-gsh75\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.153098 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7be2f013-d656-48d9-b332-e66e20efa66f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6efa070ceb93cc5fc2e76eab6d9c96ac3c4f8812085d0b6eb6e3f513b5bac782\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3454e762466e22e2a893650b9781823558bc6fdfda2aa4188aff3cb819014c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/etc/whereabouts/config\\\",\\\"name\\\":\\\"whereabouts-flatfile-configmap\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-w5d5n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.183852 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.183887 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.183896 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.183908 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.183917 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.189184 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"976aa95a-addb-4ae4-9ec8-efd0863c66af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qznm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qznm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-57b78d8988-gsh75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.201417 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-7wgxz" event={"ID":"477de9be-7588-4409-8970-8585874094e8","Type":"ContainerStarted","Data":"0fd39bf11ba25ceba4cef7b97d23562d0b3d504c1abb810f831643ba9f6b5db4"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.202470 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" event={"ID":"34177974-8d82-49d2-a763-391d0df3bbd8","Type":"ContainerStarted","Data":"1d5db4a777b51284ac22a85e5a2d13cf34a2e74b6ff5a63f161a43b0ed3f7fea"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.203673 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" event={"ID":"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc","Type":"ContainerStarted","Data":"d99bed4028639f6071dfd445b63cd6cdf655fe3b6033f3df52495f04afa038e2"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.204792 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" event={"ID":"fc4541ce-7789-4670-bc75-5c2868e52ce0","Type":"ContainerStarted","Data":"d75a4a2cb143493aed4e7722f70eead5282a2aed2a3a390389f462d9a3996f32"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.206250 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qxpww" event={"ID":"0cf99dcb-47cd-4077-9fb1-e39bf209e431","Type":"ContainerStarted","Data":"dfdc0c7ba54c93066a8c26dc022531e79e89a59b4cf3cd93c697fdcaef8aaebf"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.206982 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerStarted","Data":"06185bf6d784658b624b585685238eabd88c63780f0eafdd2779900a14e9a17a"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.207705 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-g8ccx" event={"ID":"f00bde3a-9397-4146-a9c4-22c9093d1608","Type":"ContainerStarted","Data":"c9c8163088d1caa2e2e2123e2f3ea299126c60a8e31a75760066ced14cb0470a"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.231779 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.239422 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.239470 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.239501 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.239566 5119 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.239622 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:25.239609568 +0000 UTC m=+89.253672027 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.239631 5119 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.239736 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:25.23971121 +0000 UTC m=+89.253773739 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.239783 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.239957 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.240013 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.240028 5119 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.240055 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.240095 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.240109 5119 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.240064 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:25.240054319 +0000 UTC m=+89.254116778 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.240224 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:25.240188752 +0000 UTC m=+89.254251251 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.272494 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17b87002-b798-480a-8e17-83053d698239\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gwt8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-fhkjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.287694 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.287735 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.287746 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.287759 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.287768 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.309702 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbdfe828b092b23e6d4480daf3e0216aada6debaf1ef1b314a0a31e73ebf13c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-5ff7774fd9-nljh6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.352368 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-qxpww" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf99dcb-47cd-4077-9fb1-e39bf209e431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rgpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qxpww\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.370066 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:11:24 crc kubenswrapper[5119]: W0130 00:11:24.387280 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddff39619_cf4b_4c00_8d99_71c924fcf4c2.slice/crio-7084fcb3c1dad1c5221b1409cfb341b8d9099869da1e67886922aff0b7300dba WatchSource:0}: Error finding container 7084fcb3c1dad1c5221b1409cfb341b8d9099869da1e67886922aff0b7300dba: Status 404 returned error can't find the container with id 7084fcb3c1dad1c5221b1409cfb341b8d9099869da1e67886922aff0b7300dba Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.388595 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.390216 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8gjq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"64ebdc45-679c-4414-84fa-805ed5d07898\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvb5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:49b34ce0d25eec7a6077f4bf21bf7d4e64e598d28785a20b9ee3594423b7de14\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvb5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8gjq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.391600 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.391627 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.391638 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.391654 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.391667 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.399122 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.399159 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.399171 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.399185 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.399196 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: W0130 00:11:24.401634 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7337c888_01aa_4a6b_b494_7a51eff39634.slice/crio-f23c029375f63319b7e9208cc19fd212e1bd325e2d7139da03db451166d0a71a WatchSource:0}: Error finding container f23c029375f63319b7e9208cc19fd212e1bd325e2d7139da03db451166d0a71a: Status 404 returned error can't find the container with id f23c029375f63319b7e9208cc19fd212e1bd325e2d7139da03db451166d0a71a Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.405567 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.407515 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.410018 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.410054 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.410066 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.410081 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.410091 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.417744 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.420429 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.420461 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.420475 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.420490 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.420503 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.429433 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.430850 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a38f3f8-2148-44b1-a810-3575768476c1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://85041ef0cd373f79431708df645822f3e2297976dc78bf3d5d6c3f2fd983b55b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c3b5150c83cf2a286e1ea02402aa68603f4b4d47bbd5b7a9b98dd36b6093f984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3b5150c83cf2a286e1ea02402aa68603f4b4d47bbd5b7a9b98dd36b6093f984\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.432266 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.432299 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.432310 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.432324 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.432333 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: W0130 00:11:24.436535 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod976aa95a_addb_4ae4_9ec8_efd0863c66af.slice/crio-63a3aed84087fd31e4f22ce50456603c6699c61f29660009544c63ff9a536a81 WatchSource:0}: Error finding container 63a3aed84087fd31e4f22ce50456603c6699c61f29660009544c63ff9a536a81: Status 404 returned error can't find the container with id 63a3aed84087fd31e4f22ce50456603c6699c61f29660009544c63ff9a536a81 Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.440873 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.441166 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.441406 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:25.441371886 +0000 UTC m=+89.455434345 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.443685 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.443722 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.443734 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.443750 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.443761 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.450900 5119 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400460Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861260Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0450a234-c8a5-4d6e-a553-22c02a94238f\\\",\\\"systemUUID\\\":\\\"fd6e6faa-72a3-42f3-97bf-c98b5bfd4d42\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.451066 5119 kubelet_node_status.go:584] "Unable to update node status" err="update node status exceeds retry count" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.474513 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7337c888-01aa-4a6b-b494-7a51eff39634\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nwvqg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.493516 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.493555 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.493564 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.493578 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.493588 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.542491 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.542623 5119 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.542762 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs podName:64ebdc45-679c-4414-84fa-805ed5d07898 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:25.542744496 +0000 UTC m=+89.556806955 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs") pod "network-metrics-daemon-8gjq7" (UID: "64ebdc45-679c-4414-84fa-805ed5d07898") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.596200 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.596246 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.596256 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.596274 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.596283 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.701635 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.701679 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.701691 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.701709 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.701725 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.748482 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.748517 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.748624 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:24 crc kubenswrapper[5119]: E0130 00:11:24.748775 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.763848 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01080b46-74f1-4191-8755-5152a57b3b25" path="/var/lib/kubelet/pods/01080b46-74f1-4191-8755-5152a57b3b25/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.764757 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09cfa50b-4138-4585-a53e-64dd3ab73335" path="/var/lib/kubelet/pods/09cfa50b-4138-4585-a53e-64dd3ab73335/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.803275 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.803324 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.803336 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.803350 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.803360 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.822514 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dd0fbac-8c0d-4228-8faa-abbeedabf7db" path="/var/lib/kubelet/pods/0dd0fbac-8c0d-4228-8faa-abbeedabf7db/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.826559 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0effdbcf-dd7d-404d-9d48-77536d665a5d" path="/var/lib/kubelet/pods/0effdbcf-dd7d-404d-9d48-77536d665a5d/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.834327 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="149b3c48-e17c-4a66-a835-d86dabf6ff13" path="/var/lib/kubelet/pods/149b3c48-e17c-4a66-a835-d86dabf6ff13/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.849979 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16bdd140-dce1-464c-ab47-dd5798d1d256" path="/var/lib/kubelet/pods/16bdd140-dce1-464c-ab47-dd5798d1d256/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.851559 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18f80adb-c1c3-49ba-8ee4-932c851d3897" path="/var/lib/kubelet/pods/18f80adb-c1c3-49ba-8ee4-932c851d3897/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.865821 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" path="/var/lib/kubelet/pods/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.866529 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2325ffef-9d5b-447f-b00e-3efc429acefe" path="/var/lib/kubelet/pods/2325ffef-9d5b-447f-b00e-3efc429acefe/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.873872 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="301e1965-1754-483d-b6cc-bfae7038bbca" path="/var/lib/kubelet/pods/301e1965-1754-483d-b6cc-bfae7038bbca/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.892702 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31fa8943-81cc-4750-a0b7-0fa9ab5af883" path="/var/lib/kubelet/pods/31fa8943-81cc-4750-a0b7-0fa9ab5af883/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.894859 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42a11a02-47e1-488f-b270-2679d3298b0e" path="/var/lib/kubelet/pods/42a11a02-47e1-488f-b270-2679d3298b0e/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.896250 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="567683bd-0efc-4f21-b076-e28559628404" path="/var/lib/kubelet/pods/567683bd-0efc-4f21-b076-e28559628404/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.901805 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="584e1f4a-8205-47d7-8efb-3afc6017c4c9" path="/var/lib/kubelet/pods/584e1f4a-8205-47d7-8efb-3afc6017c4c9/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.902295 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="593a3561-7760-45c5-8f91-5aaef7475d0f" path="/var/lib/kubelet/pods/593a3561-7760-45c5-8f91-5aaef7475d0f/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.904521 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ebfebf6-3ecd-458e-943f-bb25b52e2718" path="/var/lib/kubelet/pods/5ebfebf6-3ecd-458e-943f-bb25b52e2718/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.905033 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.905073 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.905086 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.905103 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.905114 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.905458 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6077b63e-53a2-4f96-9d56-1ce0324e4913" path="/var/lib/kubelet/pods/6077b63e-53a2-4f96-9d56-1ce0324e4913/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.907584 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" path="/var/lib/kubelet/pods/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.909466 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6edfcf45-925b-4eff-b940-95b6fc0b85d4" path="/var/lib/kubelet/pods/6edfcf45-925b-4eff-b940-95b6fc0b85d4/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.916416 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ee8fbd3-1f81-4666-96da-5afc70819f1a" path="/var/lib/kubelet/pods/6ee8fbd3-1f81-4666-96da-5afc70819f1a/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.930506 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" path="/var/lib/kubelet/pods/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.993778 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="736c54fe-349c-4bb9-870a-d1c1d1c03831" path="/var/lib/kubelet/pods/736c54fe-349c-4bb9-870a-d1c1d1c03831/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.996178 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7599e0b6-bddf-4def-b7f2-0b32206e8651" path="/var/lib/kubelet/pods/7599e0b6-bddf-4def-b7f2-0b32206e8651/volumes" Jan 30 00:11:24 crc kubenswrapper[5119]: I0130 00:11:24.998444 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7afa918d-be67-40a6-803c-d3b0ae99d815" path="/var/lib/kubelet/pods/7afa918d-be67-40a6-803c-d3b0ae99d815/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.000229 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7df94c10-441d-4386-93a6-6730fb7bcde0" path="/var/lib/kubelet/pods/7df94c10-441d-4386-93a6-6730fb7bcde0/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.001356 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" path="/var/lib/kubelet/pods/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.007442 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.007479 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.007491 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.007507 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.007518 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.009741 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81e39f7b-62e4-4fc9-992a-6535ce127a02" path="/var/lib/kubelet/pods/81e39f7b-62e4-4fc9-992a-6535ce127a02/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.010583 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="869851b9-7ffb-4af0-b166-1d8aa40a5f80" path="/var/lib/kubelet/pods/869851b9-7ffb-4af0-b166-1d8aa40a5f80/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.012823 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" path="/var/lib/kubelet/pods/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.013761 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92dfbade-90b6-4169-8c07-72cff7f2c82b" path="/var/lib/kubelet/pods/92dfbade-90b6-4169-8c07-72cff7f2c82b/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.017862 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94a6e063-3d1a-4d44-875d-185291448c31" path="/var/lib/kubelet/pods/94a6e063-3d1a-4d44-875d-185291448c31/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.019078 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f71a554-e414-4bc3-96d2-674060397afe" path="/var/lib/kubelet/pods/9f71a554-e414-4bc3-96d2-674060397afe/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.028575 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a208c9c2-333b-4b4a-be0d-bc32ec38a821" path="/var/lib/kubelet/pods/a208c9c2-333b-4b4a-be0d-bc32ec38a821/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.032720 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a52afe44-fb37-46ed-a1f8-bf39727a3cbe" path="/var/lib/kubelet/pods/a52afe44-fb37-46ed-a1f8-bf39727a3cbe/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.033708 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a555ff2e-0be6-46d5-897d-863bb92ae2b3" path="/var/lib/kubelet/pods/a555ff2e-0be6-46d5-897d-863bb92ae2b3/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.035169 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7a88189-c967-4640-879e-27665747f20c" path="/var/lib/kubelet/pods/a7a88189-c967-4640-879e-27665747f20c/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.036500 5119 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="af33e427-6803-48c2-a76a-dd9deb7cbf9a" path="/var/lib/kubelet/pods/af33e427-6803-48c2-a76a-dd9deb7cbf9a/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.036613 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af33e427-6803-48c2-a76a-dd9deb7cbf9a" path="/var/lib/kubelet/pods/af33e427-6803-48c2-a76a-dd9deb7cbf9a/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.046744 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af41de71-79cf-4590-bbe9-9e8b848862cb" path="/var/lib/kubelet/pods/af41de71-79cf-4590-bbe9-9e8b848862cb/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.050136 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" path="/var/lib/kubelet/pods/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.056267 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4750666-1362-4001-abd0-6f89964cc621" path="/var/lib/kubelet/pods/b4750666-1362-4001-abd0-6f89964cc621/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.058793 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b605f283-6f2e-42da-a838-54421690f7d0" path="/var/lib/kubelet/pods/b605f283-6f2e-42da-a838-54421690f7d0/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.059309 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c491984c-7d4b-44aa-8c1e-d7974424fa47" path="/var/lib/kubelet/pods/c491984c-7d4b-44aa-8c1e-d7974424fa47/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.064200 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5f2bfad-70f6-4185-a3d9-81ce12720767" path="/var/lib/kubelet/pods/c5f2bfad-70f6-4185-a3d9-81ce12720767/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.065220 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc85e424-18b2-4924-920b-bd291a8c4b01" path="/var/lib/kubelet/pods/cc85e424-18b2-4924-920b-bd291a8c4b01/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.066209 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce090a97-9ab6-4c40-a719-64ff2acd9778" path="/var/lib/kubelet/pods/ce090a97-9ab6-4c40-a719-64ff2acd9778/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.072955 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d19cb085-0c5b-4810-b654-ce7923221d90" path="/var/lib/kubelet/pods/d19cb085-0c5b-4810-b654-ce7923221d90/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.077003 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" path="/var/lib/kubelet/pods/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.080771 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d565531a-ff86-4608-9d19-767de01ac31b" path="/var/lib/kubelet/pods/d565531a-ff86-4608-9d19-767de01ac31b/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.082633 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7e8f42f-dc0e-424b-bb56-5ec849834888" path="/var/lib/kubelet/pods/d7e8f42f-dc0e-424b-bb56-5ec849834888/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.084577 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" path="/var/lib/kubelet/pods/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.090726 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e093be35-bb62-4843-b2e8-094545761610" path="/var/lib/kubelet/pods/e093be35-bb62-4843-b2e8-094545761610/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.091648 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1d2a42d-af1d-4054-9618-ab545e0ed8b7" path="/var/lib/kubelet/pods/e1d2a42d-af1d-4054-9618-ab545e0ed8b7/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.093171 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f559dfa3-3917-43a2-97f6-61ddfda10e93" path="/var/lib/kubelet/pods/f559dfa3-3917-43a2-97f6-61ddfda10e93/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.099626 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f65c0ac1-8bca-454d-a2e6-e35cb418beac" path="/var/lib/kubelet/pods/f65c0ac1-8bca-454d-a2e6-e35cb418beac/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.100574 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4" path="/var/lib/kubelet/pods/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.101943 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7e2c886-118e-43bb-bef1-c78134de392b" path="/var/lib/kubelet/pods/f7e2c886-118e-43bb-bef1-c78134de392b/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.104082 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc8db2c7-859d-47b3-a900-2bd0c0b2973b" path="/var/lib/kubelet/pods/fc8db2c7-859d-47b3-a900-2bd0c0b2973b/volumes" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.110313 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.110343 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.110351 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.110362 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.110372 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.211759 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.211793 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.211803 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.211816 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.211826 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.213012 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerStarted","Data":"4ec9f325eee2102e27ce2c2c8fd3570bc6b933200f4125272f5d5dc6a4741502"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.213060 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerStarted","Data":"7084fcb3c1dad1c5221b1409cfb341b8d9099869da1e67886922aff0b7300dba"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.214610 5119 generic.go:358] "Generic (PLEG): container finished" podID="7337c888-01aa-4a6b-b494-7a51eff39634" containerID="d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb" exitCode=0 Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.214700 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerDied","Data":"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.214731 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerStarted","Data":"f23c029375f63319b7e9208cc19fd212e1bd325e2d7139da03db451166d0a71a"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.217644 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qxpww" event={"ID":"0cf99dcb-47cd-4077-9fb1-e39bf209e431","Type":"ContainerStarted","Data":"0c105e9976984cb6d41e14b3c489d698232bb451fdc0dbc82cb48a1e367f105b"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.220485 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-7wgxz" event={"ID":"477de9be-7588-4409-8970-8585874094e8","Type":"ContainerStarted","Data":"7d8da4244e02d481bf4af70ce2bdea9dafbfda2d1e9c4e11227fc01981f74e70"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.221990 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" event={"ID":"34177974-8d82-49d2-a763-391d0df3bbd8","Type":"ContainerStarted","Data":"e5cccee6fa85acafe4760b5b5d2908ffa36c3b6229815a48fc0dbfaf705aa28a"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.225016 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" event={"ID":"fc4541ce-7789-4670-bc75-5c2868e52ce0","Type":"ContainerStarted","Data":"2069af15d8bc703844201cec66d39e20c6a9670339596d75364269009c734233"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.225176 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" event={"ID":"fc4541ce-7789-4670-bc75-5c2868e52ce0","Type":"ContainerStarted","Data":"5a2b9bf7d3b0a9b5e7ba3f25b213fb73f33acd44e81c8c24733bc704957c523b"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.226714 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerStarted","Data":"3a6d27cff8f24a951a1c3f524144c6f12cdf864c201adb282ef2ba46fe31c3ff"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.229030 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-g8ccx" event={"ID":"f00bde3a-9397-4146-a9c4-22c9093d1608","Type":"ContainerStarted","Data":"e15a486bcad6e08fc925a4fcde489df89093b312ec1f1c0bea15b39149222d23"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.230808 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" event={"ID":"976aa95a-addb-4ae4-9ec8-efd0863c66af","Type":"ContainerStarted","Data":"778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.230832 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" event={"ID":"976aa95a-addb-4ae4-9ec8-efd0863c66af","Type":"ContainerStarted","Data":"63a3aed84087fd31e4f22ce50456603c6699c61f29660009544c63ff9a536a81"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.232857 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7337c888-01aa-4a6b-b494-7a51eff39634\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"resources\\\":{},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:11:24Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4qqn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nwvqg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.249905 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.249964 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.250096 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.250136 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.251625 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.251647 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.251657 5119 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.251702 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:27.251688835 +0000 UTC m=+91.265751294 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.252357 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.252375 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.252382 5119 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.252433 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:27.252421352 +0000 UTC m=+91.266483811 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.252991 5119 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.253045 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:27.253033727 +0000 UTC m=+91.267096246 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.253258 5119 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.253349 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:27.253327704 +0000 UTC m=+91.267390213 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.262275 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cff142ed-dc20-4c3a-b157-dbc3d3cdd9af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"},\\\"containerID\\\":\\\"cri-o://e3beffa08faf81da0394c450397815931c72fd49f42fd6218edbaac85fe6528f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://67b9fad7cb88a39c44754f831ea6adeeea20e24f7bf2131cfd0d76dde042924c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://b9aba26083b8cd47e9a89c003b6fec66d485c32c4e80234a1f3e95d56d86e185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://1e870376ee035d24f0f3af0d51bbb91b9860fd137c3f88364c555717aca89a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://1abdada27c369710bf3ba52e3f0e584cecec6ad6f6f11b5757c0d0748d7ae54c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c43657b4b20e341dcfedff314e3d3465e7cfe7bbf41accc0e0e85f10f9c7b5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c43657b4b20e341dcfedff314e3d3465e7cfe7bbf41accc0e0e85f10f9c7b5a4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd-auto-backup\\\",\\\"name\\\":\\\"etcd-auto-backup-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://1c3d092f2660f2aa973d143779dbceb7b50b612d7c321d44e9b2f15927a434cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c3d092f2660f2aa973d143779dbceb7b50b612d7c321d44e9b2f15927a434cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://bc1ae53975abf21f2965d1f74a1167fbfee499d97830ad56708df3c81cf0083a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc1ae53975abf21f2965d1f74a1167fbfee499d97830ad56708df3c81cf0083a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:10:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:10:00Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.276881 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00289c64-f714-4516-af94-db01b82df194\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"},\\\"containerID\\\":\\\"cri-o://71ec6c2a4f2b4ceaf5bd2fe00c0dcc945915014237a6dfe3044ada4899a26c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-bundle-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://8d35f4604e32d0d4804a3b34156ed8698a40a743f7ce10ed428780839daeab66\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://5cc2f86492ba54c66d9c6c1a9a34f75bf42fbaa9909b03d0311777b0c0a3795c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T00:11:06Z\\\",\\\"message\\\":\\\"172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nW0130 00:11:05.502958 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 00:11:05.503112 1 builder.go:304] check-endpoints version v0.0.0-unknown-c3d9642-c3d9642\\\\nI0130 00:11:05.504002 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1044629914/tls.crt::/tmp/serving-cert-1044629914/tls.key\\\\\\\"\\\\nI0130 00:11:06.510687 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 00:11:06.512127 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 00:11:06.512141 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 00:11:06.512164 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 00:11:06.512172 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 00:11:06.515444 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0130 00:11:06.515452 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 00:11:06.515483 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:11:06.515488 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:11:06.515492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 00:11:06.515495 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 00:11:06.515498 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 00:11:06.515500 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 00:11:06.517131 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T00:11:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://3ec64836842604d724c5b3fc6e03787859f37bb6f3f2d868b57963814407dba3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://6cb744f13386a9d31a521fa5f1bf2bc1e5c867622a28725ebe5ec96adaed410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cb744f13386a9d31a521fa5f1bf2bc1e5c867622a28725ebe5ec96adaed410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.286742 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f46b0126-7822-4db5-9ef8-f7cc9793bda3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://a111fb94f1a5c8692743f3b12a7215a12315afa40d7e89530817ba3c83892220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://25355ecadd6dcd398c71e3a7073c9f69211f36b2afac9f2f6984d6df7ac981fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://76c06870c12f5d5668f1c7b7bc8ce2a5614be4f6683f4be3deec72bd8c765802\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://9806c9a05c8e04261d45e139bdd205f5599c68f41d7290cdd95fb5dc65be4755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9806c9a05c8e04261d45e139bdd205f5599c68f41d7290cdd95fb5dc65be4755\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.296843 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.306983 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.313460 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.313497 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.313510 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.313529 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.313542 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.314341 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dff39619-cf4b-4c00-8d99-71c924fcf4c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k6t4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k6t4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hf5dd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.324245 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa6890cd-00f8-4ffe-b5bb-bba28a4aec8c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://26f561423f825ded7c40148a15fe8ae193d72852cc00f4c38d26a15e0459e067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://b1318090dd33aa89fd1dbebbe525d86fe9687c107990e001fccdb19bff19c2fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://19610627b078e161daa90c9332e976149504a06e21fdad1a326056db66c382e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://24d0493bc380958b22799e8abc55dbf5ba40bd473d68ac50594d66012108fb54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.334365 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.341839 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-dns/node-resolver-7wgxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477de9be-7588-4409-8970-8585874094e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d2nqp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7wgxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.348463 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g8ccx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f00bde3a-9397-4146-a9c4-22c9093d1608\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nbv85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g8ccx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.360415 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7be2f013-d656-48d9-b332-e66e20efa66f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6efa070ceb93cc5fc2e76eab6d9c96ac3c4f8812085d0b6eb6e3f513b5bac782\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3454e762466e22e2a893650b9781823558bc6fdfda2aa4188aff3cb819014c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/etc/whereabouts/config\\\",\\\"name\\\":\\\"whereabouts-flatfile-configmap\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-w5d5n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.368010 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"976aa95a-addb-4ae4-9ec8-efd0863c66af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qznm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qznm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-57b78d8988-gsh75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.376771 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.394909 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17b87002-b798-480a-8e17-83053d698239\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gwt8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-fhkjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.405248 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbdfe828b092b23e6d4480daf3e0216aada6debaf1ef1b314a0a31e73ebf13c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-5ff7774fd9-nljh6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.415143 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.415177 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.415198 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.415218 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.415231 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.415980 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-qxpww" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf99dcb-47cd-4077-9fb1-e39bf209e431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rgpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qxpww\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.423679 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-8gjq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"64ebdc45-679c-4414-84fa-805ed5d07898\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvb5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:49b34ce0d25eec7a6077f4bf21bf7d4e64e598d28785a20b9ee3594423b7de14\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvb5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-8gjq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.431644 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a38f3f8-2148-44b1-a810-3575768476c1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://85041ef0cd373f79431708df645822f3e2297976dc78bf3d5d6c3f2fd983b55b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c3b5150c83cf2a286e1ea02402aa68603f4b4d47bbd5b7a9b98dd36b6093f984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3b5150c83cf2a286e1ea02402aa68603f4b4d47bbd5b7a9b98dd36b6093f984\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.451614 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.451777 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:27.451761802 +0000 UTC m=+91.465824261 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.455103 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cff142ed-dc20-4c3a-b157-dbc3d3cdd9af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"},\\\"containerID\\\":\\\"cri-o://e3beffa08faf81da0394c450397815931c72fd49f42fd6218edbaac85fe6528f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://67b9fad7cb88a39c44754f831ea6adeeea20e24f7bf2131cfd0d76dde042924c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://b9aba26083b8cd47e9a89c003b6fec66d485c32c4e80234a1f3e95d56d86e185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://1e870376ee035d24f0f3af0d51bbb91b9860fd137c3f88364c555717aca89a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://1abdada27c369710bf3ba52e3f0e584cecec6ad6f6f11b5757c0d0748d7ae54c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:10:01Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c43657b4b20e341dcfedff314e3d3465e7cfe7bbf41accc0e0e85f10f9c7b5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c43657b4b20e341dcfedff314e3d3465e7cfe7bbf41accc0e0e85f10f9c7b5a4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd-auto-backup\\\",\\\"name\\\":\\\"etcd-auto-backup-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://1c3d092f2660f2aa973d143779dbceb7b50b612d7c321d44e9b2f15927a434cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c3d092f2660f2aa973d143779dbceb7b50b612d7c321d44e9b2f15927a434cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://bc1ae53975abf21f2965d1f74a1167fbfee499d97830ad56708df3c81cf0083a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc1ae53975abf21f2965d1f74a1167fbfee499d97830ad56708df3c81cf0083a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:10:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:10:00Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.467322 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00289c64-f714-4516-af94-db01b82df194\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"},\\\"containerID\\\":\\\"cri-o://71ec6c2a4f2b4ceaf5bd2fe00c0dcc945915014237a6dfe3044ada4899a26c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-bundle-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://8d35f4604e32d0d4804a3b34156ed8698a40a743f7ce10ed428780839daeab66\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://5cc2f86492ba54c66d9c6c1a9a34f75bf42fbaa9909b03d0311777b0c0a3795c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T00:11:06Z\\\",\\\"message\\\":\\\"172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nW0130 00:11:05.502958 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 00:11:05.503112 1 builder.go:304] check-endpoints version v0.0.0-unknown-c3d9642-c3d9642\\\\nI0130 00:11:05.504002 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1044629914/tls.crt::/tmp/serving-cert-1044629914/tls.key\\\\\\\"\\\\nI0130 00:11:06.510687 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 00:11:06.512127 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 00:11:06.512141 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 00:11:06.512164 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 00:11:06.512172 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 00:11:06.515444 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0130 00:11:06.515452 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 00:11:06.515483 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:11:06.515488 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:11:06.515492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 00:11:06.515495 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 00:11:06.515498 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 00:11:06.515500 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 00:11:06.517131 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T00:11:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://3ec64836842604d724c5b3fc6e03787859f37bb6f3f2d868b57963814407dba3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://6cb744f13386a9d31a521fa5f1bf2bc1e5c867622a28725ebe5ec96adaed410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cb744f13386a9d31a521fa5f1bf2bc1e5c867622a28725ebe5ec96adaed410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.476631 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f46b0126-7822-4db5-9ef8-f7cc9793bda3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://a111fb94f1a5c8692743f3b12a7215a12315afa40d7e89530817ba3c83892220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://25355ecadd6dcd398c71e3a7073c9f69211f36b2afac9f2f6984d6df7ac981fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://76c06870c12f5d5668f1c7b7bc8ce2a5614be4f6683f4be3deec72bd8c765802\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://9806c9a05c8e04261d45e139bdd205f5599c68f41d7290cdd95fb5dc65be4755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9806c9a05c8e04261d45e139bdd205f5599c68f41d7290cdd95fb5dc65be4755\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.486143 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://e5cccee6fa85acafe4760b5b5d2908ffa36c3b6229815a48fc0dbfaf705aa28a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:24Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.494570 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.504781 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dff39619-cf4b-4c00-8d99-71c924fcf4c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k6t4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k6t4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hf5dd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.516582 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa6890cd-00f8-4ffe-b5bb-bba28a4aec8c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://26f561423f825ded7c40148a15fe8ae193d72852cc00f4c38d26a15e0459e067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://b1318090dd33aa89fd1dbebbe525d86fe9687c107990e001fccdb19bff19c2fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://19610627b078e161daa90c9332e976149504a06e21fdad1a326056db66c382e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:58Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://24d0493bc380958b22799e8abc55dbf5ba40bd473d68ac50594d66012108fb54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:59Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.517105 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.517146 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.517156 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.517169 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.517178 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.551873 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://2069af15d8bc703844201cec66d39e20c6a9670339596d75364269009c734233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:24Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0,1000500000],\\\"uid\\\":1000500000}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://5a2b9bf7d3b0a9b5e7ba3f25b213fb73f33acd44e81c8c24733bc704957c523b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:24Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0,1000500000],\\\"uid\\\":1000500000}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.552258 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.552406 5119 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.552467 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs podName:64ebdc45-679c-4414-84fa-805ed5d07898 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:27.552453045 +0000 UTC m=+91.566515504 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs") pod "network-metrics-daemon-8gjq7" (UID: "64ebdc45-679c-4414-84fa-805ed5d07898") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.589853 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-dns/node-resolver-7wgxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477de9be-7588-4409-8970-8585874094e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"21Mi\\\"},\\\"containerID\\\":\\\"cri-o://7d8da4244e02d481bf4af70ce2bdea9dafbfda2d1e9c4e11227fc01981f74e70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"21Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:24Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d2nqp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7wgxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.619878 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.619922 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.619934 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.619948 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.619959 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.629284 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g8ccx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f00bde3a-9397-4146-a9c4-22c9093d1608\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nbv85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g8ccx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.673211 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7be2f013-d656-48d9-b332-e66e20efa66f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6d27cff8f24a951a1c3f524144c6f12cdf864c201adb282ef2ba46fe31c3ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"resources\\\":{},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:24Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6efa070ceb93cc5fc2e76eab6d9c96ac3c4f8812085d0b6eb6e3f513b5bac782\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3454e762466e22e2a893650b9781823558bc6fdfda2aa4188aff3cb819014c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/etc/whereabouts/config\\\",\\\"name\\\":\\\"whereabouts-flatfile-configmap\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d9jb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-w5d5n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.712558 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"976aa95a-addb-4ae4-9ec8-efd0863c66af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qznm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qznm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-57b78d8988-gsh75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.721744 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.721790 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.721800 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.721814 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.721823 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.748491 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.748660 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.748740 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:25 crc kubenswrapper[5119]: E0130 00:11:25.748922 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8gjq7" podUID="64ebdc45-679c-4414-84fa-805ed5d07898" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.751530 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.823796 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.823845 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.823854 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.823869 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.823878 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.915126 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-qxpww" podStartSLOduration=69.915103426 podStartE2EDuration="1m9.915103426s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:25.873253489 +0000 UTC m=+89.887315948" watchObservedRunningTime="2026-01-30 00:11:25.915103426 +0000 UTC m=+89.929165885" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.925350 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.925420 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.925435 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.925455 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5119]: I0130 00:11:25.925467 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.005181 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=3.005161855 podStartE2EDuration="3.005161855s" podCreationTimestamp="2026-01-30 00:11:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:25.956971144 +0000 UTC m=+89.971033603" watchObservedRunningTime="2026-01-30 00:11:26.005161855 +0000 UTC m=+90.019224314" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.027541 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.027589 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.027599 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.027618 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.027627 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.129227 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.129292 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.129302 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.129317 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.129326 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.231360 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.231414 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.231425 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.231454 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.231467 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.238474 5119 generic.go:358] "Generic (PLEG): container finished" podID="7be2f013-d656-48d9-b332-e66e20efa66f" containerID="3a6d27cff8f24a951a1c3f524144c6f12cdf864c201adb282ef2ba46fe31c3ff" exitCode=0 Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.238553 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerDied","Data":"3a6d27cff8f24a951a1c3f524144c6f12cdf864c201adb282ef2ba46fe31c3ff"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.240130 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" event={"ID":"976aa95a-addb-4ae4-9ec8-efd0863c66af","Type":"ContainerStarted","Data":"8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.241661 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerStarted","Data":"c6fc486250362b90f4f4bfb8f67c74342dffa49ce1377fd6f45d77a2b89a3d70"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.246345 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerStarted","Data":"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.246417 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerStarted","Data":"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.246427 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerStarted","Data":"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.246441 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerStarted","Data":"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.246450 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerStarted","Data":"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.254793 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=3.254776834 podStartE2EDuration="3.254776834s" podCreationTimestamp="2026-01-30 00:11:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:26.254705853 +0000 UTC m=+90.268768332" watchObservedRunningTime="2026-01-30 00:11:26.254776834 +0000 UTC m=+90.268839283" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.327141 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=4.327120466 podStartE2EDuration="4.327120466s" podCreationTimestamp="2026-01-30 00:11:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:26.313442767 +0000 UTC m=+90.327505246" watchObservedRunningTime="2026-01-30 00:11:26.327120466 +0000 UTC m=+90.341182925" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.333799 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.333839 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.333850 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.333865 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.333877 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.338434 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-7wgxz" podStartSLOduration=71.338420328 podStartE2EDuration="1m11.338420328s" podCreationTimestamp="2026-01-30 00:10:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:26.337792023 +0000 UTC m=+90.351854482" watchObservedRunningTime="2026-01-30 00:11:26.338420328 +0000 UTC m=+90.352482797" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.436182 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.436224 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.436234 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.436250 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.436270 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.487343 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=3.487328513 podStartE2EDuration="3.487328513s" podCreationTimestamp="2026-01-30 00:11:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:26.483697826 +0000 UTC m=+90.497760285" watchObservedRunningTime="2026-01-30 00:11:26.487328513 +0000 UTC m=+90.501390972" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.546731 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.546765 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.546776 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.546788 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.546797 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.572841 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podStartSLOduration=70.57276715 podStartE2EDuration="1m10.57276715s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:26.572134135 +0000 UTC m=+90.586196594" watchObservedRunningTime="2026-01-30 00:11:26.57276715 +0000 UTC m=+90.586829609" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.635727 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-g8ccx" podStartSLOduration=70.635710476 podStartE2EDuration="1m10.635710476s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:26.593711474 +0000 UTC m=+90.607773933" watchObservedRunningTime="2026-01-30 00:11:26.635710476 +0000 UTC m=+90.649772935" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.648948 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.649006 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.649018 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.649036 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.649046 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.750152 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:26 crc kubenswrapper[5119]: E0130 00:11:26.750266 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.750450 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:26 crc kubenswrapper[5119]: E0130 00:11:26.750531 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.751539 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.751566 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.751578 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.751593 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.751623 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.853785 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.853836 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.853853 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.853868 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.853877 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.955913 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.955978 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.955993 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.956011 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5119]: I0130 00:11:26.956022 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.057910 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.057949 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.057960 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.057972 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.057981 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.160680 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.160962 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.160975 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.160992 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.161004 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.251433 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerStarted","Data":"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.252417 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" event={"ID":"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc","Type":"ContainerStarted","Data":"0c9e71c43b3103ef7841e7ff47d4bcb0a4023658a2a584b6a0e5c95de019d2ef"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.254707 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerStarted","Data":"113447c156e7410d3c13f84df27644d154ec0bb3b73569df5ee6b2a1409ff01f"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.262899 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.262930 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.262938 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.262950 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.262958 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.264883 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" podStartSLOduration=70.264873303 podStartE2EDuration="1m10.264873303s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:26.636643338 +0000 UTC m=+90.650705797" watchObservedRunningTime="2026-01-30 00:11:27.264873303 +0000 UTC m=+91.278935762" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.272003 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.272070 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.272125 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.272197 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.272283 5119 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.272323 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:31.272312863 +0000 UTC m=+95.286375322 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.272581 5119 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.272612 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:31.27260531 +0000 UTC m=+95.286667769 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.273186 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.273203 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.273212 5119 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.273237 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:31.273229995 +0000 UTC m=+95.287292454 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.273662 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.273678 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.273685 5119 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.273707 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:31.273700456 +0000 UTC m=+95.287762915 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.364983 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.365060 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.365073 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.365089 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.365100 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.467318 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.467361 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.467372 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.467403 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.467416 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.474780 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.474977 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:31.474953661 +0000 UTC m=+95.489016120 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.569334 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.569380 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.569417 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.569436 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.569447 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.575743 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.575905 5119 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.575957 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs podName:64ebdc45-679c-4414-84fa-805ed5d07898 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:31.575938923 +0000 UTC m=+95.590001382 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs") pod "network-metrics-daemon-8gjq7" (UID: "64ebdc45-679c-4414-84fa-805ed5d07898") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.671083 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.671117 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.671126 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.671139 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.671148 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.748922 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.748942 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.749064 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8gjq7" podUID="64ebdc45-679c-4414-84fa-805ed5d07898" Jan 30 00:11:27 crc kubenswrapper[5119]: E0130 00:11:27.749157 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.773225 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.773272 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.773281 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.773294 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.773305 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.875627 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.875676 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.875689 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.875705 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.875718 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.978198 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.978245 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.978254 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.978268 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5119]: I0130 00:11:27.978278 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.080717 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.080789 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.080801 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.080820 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.080832 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.209486 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.209548 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.209565 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.209587 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.209605 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.311402 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.311440 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.311449 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.311461 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.311469 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.413376 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.413423 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.413432 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.413444 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.413453 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.515114 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.515148 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.515157 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.515169 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.515178 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.616508 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.616782 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.616793 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.616808 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.616819 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.719454 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.719495 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.719505 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.719520 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.719532 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.748341 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:28 crc kubenswrapper[5119]: E0130 00:11:28.748539 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.748589 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:28 crc kubenswrapper[5119]: E0130 00:11:28.748747 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.821720 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.821765 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.821777 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.821799 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.821814 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.923455 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.923497 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.923508 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.923520 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5119]: I0130 00:11:28.923532 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.025555 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.025595 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.025604 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.025618 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.025627 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.128122 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.128173 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.128188 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.128204 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.128216 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.230598 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.230640 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.230651 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.230663 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.230674 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.263285 5119 generic.go:358] "Generic (PLEG): container finished" podID="7be2f013-d656-48d9-b332-e66e20efa66f" containerID="113447c156e7410d3c13f84df27644d154ec0bb3b73569df5ee6b2a1409ff01f" exitCode=0 Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.263375 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerDied","Data":"113447c156e7410d3c13f84df27644d154ec0bb3b73569df5ee6b2a1409ff01f"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.266807 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerStarted","Data":"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.333083 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.333121 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.333132 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.333145 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.333155 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.435648 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.436046 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.436062 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.436083 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.436099 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.538192 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.538242 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.538254 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.538268 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.538277 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.640371 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.640433 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.640443 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.640457 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.640469 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.742369 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.742432 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.742446 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.742462 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.742474 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.748855 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.748901 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:29 crc kubenswrapper[5119]: E0130 00:11:29.748976 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:29 crc kubenswrapper[5119]: E0130 00:11:29.749050 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8gjq7" podUID="64ebdc45-679c-4414-84fa-805ed5d07898" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.844725 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.844797 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.844809 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.844824 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.844834 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.946884 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.946921 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.946932 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.946944 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5119]: I0130 00:11:29.946953 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.048295 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.048350 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.048363 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.048381 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.048416 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.150248 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.150294 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.150305 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.150318 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.150330 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.252856 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.252915 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.252925 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.252937 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.253001 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.355675 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.355730 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.355745 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.355760 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.355770 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.458101 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.458147 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.458159 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.458175 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.458186 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.561220 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.561258 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.561266 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.561280 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.561290 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.664039 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.664078 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.664089 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.664107 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.664118 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.749633 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:30 crc kubenswrapper[5119]: E0130 00:11:30.749752 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.749779 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:30 crc kubenswrapper[5119]: E0130 00:11:30.749909 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.765423 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.765647 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.765775 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.765864 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.765950 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.868599 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.868638 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.868647 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.868659 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.868669 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.970474 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.970512 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.970524 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.970537 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5119]: I0130 00:11:30.970545 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.072309 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.072362 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.072374 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.072407 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.072418 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:31Z","lastTransitionTime":"2026-01-30T00:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.177869 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.178126 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.178135 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.178151 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.178161 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:31Z","lastTransitionTime":"2026-01-30T00:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.274094 5119 generic.go:358] "Generic (PLEG): container finished" podID="7be2f013-d656-48d9-b332-e66e20efa66f" containerID="25bfe708fc9e55a4843014bab10be7a28c6822d7d8fd4df103c34a8f1b7e4191" exitCode=0 Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.274167 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerDied","Data":"25bfe708fc9e55a4843014bab10be7a28c6822d7d8fd4df103c34a8f1b7e4191"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.279274 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.279444 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.279545 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.279720 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.279810 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:31Z","lastTransitionTime":"2026-01-30T00:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.349709 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.349983 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.350097 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.350215 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.350043 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.350707 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.350726 5119 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.350793 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:39.350772485 +0000 UTC m=+103.364834944 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.350849 5119 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.350916 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:39.350899438 +0000 UTC m=+103.364961897 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.350951 5119 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.350984 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:39.350977779 +0000 UTC m=+103.365040238 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.351104 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.351121 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.351131 5119 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.351167 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:39.351156764 +0000 UTC m=+103.365219223 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.384235 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.384286 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.384359 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.384384 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.384422 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:31Z","lastTransitionTime":"2026-01-30T00:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.486359 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.486388 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.486428 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.486442 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.486451 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:31Z","lastTransitionTime":"2026-01-30T00:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.552462 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.552670 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:39.552655935 +0000 UTC m=+103.566718394 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.587840 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.587884 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.587895 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.587908 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.587917 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:31Z","lastTransitionTime":"2026-01-30T00:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.653154 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.653334 5119 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.653433 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs podName:64ebdc45-679c-4414-84fa-805ed5d07898 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:39.653414561 +0000 UTC m=+103.667477020 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs") pod "network-metrics-daemon-8gjq7" (UID: "64ebdc45-679c-4414-84fa-805ed5d07898") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.690093 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.690141 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.690154 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.690172 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.690185 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:31Z","lastTransitionTime":"2026-01-30T00:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.748746 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.748746 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.748876 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:31 crc kubenswrapper[5119]: E0130 00:11:31.748943 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8gjq7" podUID="64ebdc45-679c-4414-84fa-805ed5d07898" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.791795 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.791828 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.791838 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.791852 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.791861 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:31Z","lastTransitionTime":"2026-01-30T00:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.893587 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.893621 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.893632 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.893647 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.893658 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:31Z","lastTransitionTime":"2026-01-30T00:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.995496 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.995545 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.995559 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.995575 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:31 crc kubenswrapper[5119]: I0130 00:11:31.995587 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:31Z","lastTransitionTime":"2026-01-30T00:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.097785 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.097837 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.097891 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.097906 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.097915 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:32Z","lastTransitionTime":"2026-01-30T00:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.200111 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.200156 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.200168 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.200185 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.200200 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:32Z","lastTransitionTime":"2026-01-30T00:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.288199 5119 generic.go:358] "Generic (PLEG): container finished" podID="7be2f013-d656-48d9-b332-e66e20efa66f" containerID="d0c963bf7ccce27d41dc64c69e070387db73066594a28a7a4e870216fe62f064" exitCode=0 Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.288309 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerDied","Data":"d0c963bf7ccce27d41dc64c69e070387db73066594a28a7a4e870216fe62f064"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.297355 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerStarted","Data":"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.298189 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.298245 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.298259 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.302525 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.302569 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.302580 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.302592 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.302601 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:32Z","lastTransitionTime":"2026-01-30T00:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.341910 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.342775 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.401374 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" podStartSLOduration=76.401357688 podStartE2EDuration="1m16.401357688s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:32.34951294 +0000 UTC m=+96.363575399" watchObservedRunningTime="2026-01-30 00:11:32.401357688 +0000 UTC m=+96.415420147" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.404341 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.404435 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.404455 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.404486 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.404507 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:32Z","lastTransitionTime":"2026-01-30T00:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.507054 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.507119 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.507133 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.507151 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.507164 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:32Z","lastTransitionTime":"2026-01-30T00:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.609782 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.609832 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.609843 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.609858 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.609869 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:32Z","lastTransitionTime":"2026-01-30T00:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.711590 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.711640 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.711652 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.711669 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.711680 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:32Z","lastTransitionTime":"2026-01-30T00:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.748995 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.749047 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:32 crc kubenswrapper[5119]: E0130 00:11:32.749159 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:32 crc kubenswrapper[5119]: E0130 00:11:32.749304 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.813944 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.813991 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.814001 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.814016 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.814025 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:32Z","lastTransitionTime":"2026-01-30T00:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.916001 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.916040 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.916049 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.916063 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:32 crc kubenswrapper[5119]: I0130 00:11:32.916073 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:32Z","lastTransitionTime":"2026-01-30T00:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.017803 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.017846 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.017856 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.017869 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.017879 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:33Z","lastTransitionTime":"2026-01-30T00:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.119380 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.119576 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.119588 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.119601 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.119610 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:33Z","lastTransitionTime":"2026-01-30T00:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.222368 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.222425 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.222438 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.222454 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.222466 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:33Z","lastTransitionTime":"2026-01-30T00:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.302887 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerStarted","Data":"b045eb7369a31a32c04de997a557c80ae6c87168ce5f31ebc62642e15fd78e40"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.324292 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.324753 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.324831 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.324901 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.324959 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:33Z","lastTransitionTime":"2026-01-30T00:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.426955 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.427230 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.427334 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.427454 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.427624 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:33Z","lastTransitionTime":"2026-01-30T00:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.529869 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.529907 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.529920 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.529934 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.529943 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:33Z","lastTransitionTime":"2026-01-30T00:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.632555 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.632801 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.632875 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.632962 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.633033 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:33Z","lastTransitionTime":"2026-01-30T00:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.734888 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.735197 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.735289 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.735381 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.735487 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:33Z","lastTransitionTime":"2026-01-30T00:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.748288 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:33 crc kubenswrapper[5119]: E0130 00:11:33.748523 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.748290 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:33 crc kubenswrapper[5119]: E0130 00:11:33.748831 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8gjq7" podUID="64ebdc45-679c-4414-84fa-805ed5d07898" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.837570 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.837614 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.837623 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.837640 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.837649 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:33Z","lastTransitionTime":"2026-01-30T00:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.940859 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.940904 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.940916 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.940931 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:33 crc kubenswrapper[5119]: I0130 00:11:33.940941 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:33Z","lastTransitionTime":"2026-01-30T00:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.043496 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.043697 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.043764 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.043843 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.043913 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:34Z","lastTransitionTime":"2026-01-30T00:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.147060 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.147093 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.147103 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.147116 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.147125 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:34Z","lastTransitionTime":"2026-01-30T00:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.248785 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.248836 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.248849 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.248871 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.248885 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:34Z","lastTransitionTime":"2026-01-30T00:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.350789 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.350828 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.350840 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.350855 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.350866 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:34Z","lastTransitionTime":"2026-01-30T00:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.452348 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.452403 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.452417 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.452431 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.452440 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:34Z","lastTransitionTime":"2026-01-30T00:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.554045 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.554078 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.554087 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.554099 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.554108 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:34Z","lastTransitionTime":"2026-01-30T00:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.655652 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.655690 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.655700 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.655715 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.655723 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:34Z","lastTransitionTime":"2026-01-30T00:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.700552 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-8gjq7"] Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.700690 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:34 crc kubenswrapper[5119]: E0130 00:11:34.700838 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8gjq7" podUID="64ebdc45-679c-4414-84fa-805ed5d07898" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.708344 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.708384 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.708409 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.708424 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.708435 5119 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:34Z","lastTransitionTime":"2026-01-30T00:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.748045 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:34 crc kubenswrapper[5119]: E0130 00:11:34.748208 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.748214 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:34 crc kubenswrapper[5119]: E0130 00:11:34.748586 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.748776 5119 scope.go:117] "RemoveContainer" containerID="b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09" Jan 30 00:11:34 crc kubenswrapper[5119]: E0130 00:11:34.748969 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.753677 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm"] Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.960049 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.964521 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-version\"/\"default-dockercfg-hqpm5\"" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.965007 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-version\"/\"cluster-version-operator-serving-cert\"" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.965855 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-version\"/\"openshift-service-ca.crt\"" Jan 30 00:11:34 crc kubenswrapper[5119]: I0130 00:11:34.976365 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-version\"/\"kube-root-ca.crt\"" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.094300 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3453781e-528c-44af-9765-ebdf341173e5-etc-ssl-certs\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.094423 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3453781e-528c-44af-9765-ebdf341173e5-etc-cvo-updatepayloads\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.094506 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3453781e-528c-44af-9765-ebdf341173e5-service-ca\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.094563 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3453781e-528c-44af-9765-ebdf341173e5-kube-api-access\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.094601 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3453781e-528c-44af-9765-ebdf341173e5-serving-cert\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.196371 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3453781e-528c-44af-9765-ebdf341173e5-kube-api-access\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.196526 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3453781e-528c-44af-9765-ebdf341173e5-serving-cert\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.196596 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3453781e-528c-44af-9765-ebdf341173e5-etc-ssl-certs\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.196636 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3453781e-528c-44af-9765-ebdf341173e5-etc-cvo-updatepayloads\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.196693 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3453781e-528c-44af-9765-ebdf341173e5-service-ca\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.197027 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3453781e-528c-44af-9765-ebdf341173e5-etc-ssl-certs\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.197928 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3453781e-528c-44af-9765-ebdf341173e5-service-ca\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.198073 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3453781e-528c-44af-9765-ebdf341173e5-etc-cvo-updatepayloads\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.204611 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3453781e-528c-44af-9765-ebdf341173e5-serving-cert\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.225701 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3453781e-528c-44af-9765-ebdf341173e5-kube-api-access\") pod \"cluster-version-operator-7c9b9cfd6-smwhm\" (UID: \"3453781e-528c-44af-9765-ebdf341173e5\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.286975 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" Jan 30 00:11:35 crc kubenswrapper[5119]: W0130 00:11:35.303887 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3453781e_528c_44af_9765_ebdf341173e5.slice/crio-0b47b32272ded17d91525d108fc7fdf9062d82c614ec4c441e7c0ed70c926a2f WatchSource:0}: Error finding container 0b47b32272ded17d91525d108fc7fdf9062d82c614ec4c441e7c0ed70c926a2f: Status 404 returned error can't find the container with id 0b47b32272ded17d91525d108fc7fdf9062d82c614ec4c441e7c0ed70c926a2f Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.309271 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" event={"ID":"3453781e-528c-44af-9765-ebdf341173e5","Type":"ContainerStarted","Data":"0b47b32272ded17d91525d108fc7fdf9062d82c614ec4c441e7c0ed70c926a2f"} Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.691811 5119 certificate_manager.go:566] "Rotating certificates" logger="kubernetes.io/kubelet-serving" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.699551 5119 reflector.go:430] "Caches populated" logger="kubernetes.io/kubelet-serving" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" Jan 30 00:11:35 crc kubenswrapper[5119]: I0130 00:11:35.748465 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:35 crc kubenswrapper[5119]: E0130 00:11:35.748606 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:36 crc kubenswrapper[5119]: I0130 00:11:36.314908 5119 generic.go:358] "Generic (PLEG): container finished" podID="7be2f013-d656-48d9-b332-e66e20efa66f" containerID="b045eb7369a31a32c04de997a557c80ae6c87168ce5f31ebc62642e15fd78e40" exitCode=0 Jan 30 00:11:36 crc kubenswrapper[5119]: I0130 00:11:36.314967 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerDied","Data":"b045eb7369a31a32c04de997a557c80ae6c87168ce5f31ebc62642e15fd78e40"} Jan 30 00:11:36 crc kubenswrapper[5119]: I0130 00:11:36.755132 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:36 crc kubenswrapper[5119]: E0130 00:11:36.755611 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8gjq7" podUID="64ebdc45-679c-4414-84fa-805ed5d07898" Jan 30 00:11:36 crc kubenswrapper[5119]: I0130 00:11:36.755222 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:36 crc kubenswrapper[5119]: I0130 00:11:36.755298 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:36 crc kubenswrapper[5119]: E0130 00:11:36.755824 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:36 crc kubenswrapper[5119]: E0130 00:11:36.755921 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:37 crc kubenswrapper[5119]: I0130 00:11:37.319757 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" event={"ID":"3453781e-528c-44af-9765-ebdf341173e5","Type":"ContainerStarted","Data":"a868a1fed984c30e04da611ff412fe7844fd87f7dd5dd06d328d6e62309a4c21"} Jan 30 00:11:37 crc kubenswrapper[5119]: I0130 00:11:37.748042 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:37 crc kubenswrapper[5119]: E0130 00:11:37.748266 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:37 crc kubenswrapper[5119]: I0130 00:11:37.881855 5119 reflector.go:430] "Caches populated" type="*v1.RuntimeClass" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:11:38 crc kubenswrapper[5119]: I0130 00:11:38.329079 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerStarted","Data":"32c09e2996dc4fa632a359de65dbd14293210841f9ce46f59fc08abe3bd55636"} Jan 30 00:11:38 crc kubenswrapper[5119]: I0130 00:11:38.748618 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:38 crc kubenswrapper[5119]: I0130 00:11:38.748669 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:38 crc kubenswrapper[5119]: I0130 00:11:38.748652 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:38 crc kubenswrapper[5119]: E0130 00:11:38.748785 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-8gjq7" podUID="64ebdc45-679c-4414-84fa-805ed5d07898" Jan 30 00:11:38 crc kubenswrapper[5119]: E0130 00:11:38.748861 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:38 crc kubenswrapper[5119]: E0130 00:11:38.748950 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.195478 5119 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeReady" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.196302 5119 kubelet_node_status.go:550] "Fast updating node status as it just became ready" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.242234 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-smwhm" podStartSLOduration=83.242188087 podStartE2EDuration="1m23.242188087s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:38.354369942 +0000 UTC m=+102.368432401" watchObservedRunningTime="2026-01-30 00:11:39.242188087 +0000 UTC m=+103.256250586" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.242916 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-pruner-29495520-tvgs8"] Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.443236 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.443315 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443468 5119 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443559 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.443532635 +0000 UTC m=+119.457595094 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443582 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443623 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443643 5119 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443663 5119 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443723 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.443699479 +0000 UTC m=+119.457761938 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.443580 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443746 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.44373657 +0000 UTC m=+119.457799029 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.443833 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443936 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443949 5119 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443955 5119 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.443988 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.443980345 +0000 UTC m=+119.458042804 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.646307 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.646592 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.646569323 +0000 UTC m=+119.660631782 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.748507 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.748778 5119 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:39 crc kubenswrapper[5119]: E0130 00:11:39.748938 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs podName:64ebdc45-679c-4414-84fa-805ed5d07898 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.748892297 +0000 UTC m=+119.762954796 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs") pod "network-metrics-daemon-8gjq7" (UID: "64ebdc45-679c-4414-84fa-805ed5d07898") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.796825 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-6x6hj"] Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.796992 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.798275 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.798306 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29495520-tvgs8" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.800950 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-console\"/\"networking-console-plugin\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.800959 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-diagnostics\"/\"openshift-service-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.803653 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"serviceca\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.804534 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-console\"/\"networking-console-plugin-cert\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.805256 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"pruner-dockercfg-rs58m\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.805522 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-diagnostics\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.849721 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/31630771-ce1a-4418-89ce-c58d6bf3c61f-serviceca\") pod \"image-pruner-29495520-tvgs8\" (UID: \"31630771-ce1a-4418-89ce-c58d6bf3c61f\") " pod="openshift-image-registry/image-pruner-29495520-tvgs8" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.849847 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4ws9\" (UniqueName: \"kubernetes.io/projected/31630771-ce1a-4418-89ce-c58d6bf3c61f-kube-api-access-f4ws9\") pod \"image-pruner-29495520-tvgs8\" (UID: \"31630771-ce1a-4418-89ce-c58d6bf3c61f\") " pod="openshift-image-registry/image-pruner-29495520-tvgs8" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.850361 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9"] Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.850484 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.853149 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager\"/\"openshift-controller-manager-sa-dockercfg-djmfg\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.853233 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"client-ca\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.853689 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.854434 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager\"/\"serving-cert\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.855040 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"config\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.855340 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.868968 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"openshift-global-ca\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.908220 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv"] Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.908774 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.911813 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-config\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.911870 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-tls\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.911918 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"openshift-service-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.912674 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-sa-dockercfg-wzhvk\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.913378 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.913565 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"kube-rbac-proxy\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.951073 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-client-ca\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.951783 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-f4ws9\" (UniqueName: \"kubernetes.io/projected/31630771-ce1a-4418-89ce-c58d6bf3c61f-kube-api-access-f4ws9\") pod \"image-pruner-29495520-tvgs8\" (UID: \"31630771-ce1a-4418-89ce-c58d6bf3c61f\") " pod="openshift-image-registry/image-pruner-29495520-tvgs8" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.952087 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8168819b-6cca-4680-a37d-ade6172d7778-tmp\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.952450 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/31630771-ce1a-4418-89ce-c58d6bf3c61f-serviceca\") pod \"image-pruner-29495520-tvgs8\" (UID: \"31630771-ce1a-4418-89ce-c58d6bf3c61f\") " pod="openshift-image-registry/image-pruner-29495520-tvgs8" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.952798 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/fa8ee3ba-16b1-45ac-84be-6da36536cc06-machine-approver-tls\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.953111 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa8ee3ba-16b1-45ac-84be-6da36536cc06-config\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.953430 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-config\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.953739 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8168819b-6cca-4680-a37d-ade6172d7778-serving-cert\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.954035 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr2z4\" (UniqueName: \"kubernetes.io/projected/fa8ee3ba-16b1-45ac-84be-6da36536cc06-kube-api-access-rr2z4\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.954371 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-proxy-ca-bundles\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.954679 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dd77\" (UniqueName: \"kubernetes.io/projected/8168819b-6cca-4680-a37d-ade6172d7778-kube-api-access-6dd77\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.954914 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fa8ee3ba-16b1-45ac-84be-6da36536cc06-auth-proxy-config\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.954385 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/31630771-ce1a-4418-89ce-c58d6bf3c61f-serviceca\") pod \"image-pruner-29495520-tvgs8\" (UID: \"31630771-ce1a-4418-89ce-c58d6bf3c61f\") " pod="openshift-image-registry/image-pruner-29495520-tvgs8" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.964367 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4"] Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.965129 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.969702 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-route-controller-manager\"/\"serving-cert\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.969864 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"config\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.970894 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.970932 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"client-ca\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.971097 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-route-controller-manager\"/\"route-controller-manager-sa-dockercfg-mmcpt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.971234 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.980234 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4ws9\" (UniqueName: \"kubernetes.io/projected/31630771-ce1a-4418-89ce-c58d6bf3c61f-kube-api-access-f4ws9\") pod \"image-pruner-29495520-tvgs8\" (UID: \"31630771-ce1a-4418-89ce-c58d6bf3c61f\") " pod="openshift-image-registry/image-pruner-29495520-tvgs8" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.987919 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-9ddfb9f55-9n8tq"] Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.988147 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.990834 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"openshift-service-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.990772 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"etcd-client\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.990945 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"serving-cert\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.993406 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"audit-1\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.993420 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"etcd-serving-ca\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.993425 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"trusted-ca-bundle\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.993479 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"encryption-config-1\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.993576 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5119]: I0130 00:11:39.993766 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"oauth-apiserver-sa-dockercfg-qqw4z\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.012949 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-755bb95488-sxw6b"] Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.014891 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.018661 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"etcd-client\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.019191 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"config\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.020133 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"serving-cert\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.020527 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"audit-1\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.020728 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"etcd-serving-ca\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.020791 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.020965 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"openshift-apiserver-sa-dockercfg-4zqgh\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.021057 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"image-import-ca\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.021164 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.021996 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"encryption-config-1\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.033471 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"trusted-ca-bundle\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.056159 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c6013b9-649a-4f77-a54b-c272bbbdf392-serving-cert\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.056304 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4363b09f-d35c-47ec-b96d-c9437ccf2206-serving-cert\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.056463 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-config\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.056611 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-etcd-serving-ca\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.056708 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-audit-dir\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.056778 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lw6h\" (UniqueName: \"kubernetes.io/projected/0c6013b9-649a-4f77-a54b-c272bbbdf392-kube-api-access-6lw6h\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.056845 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-node-pullsecrets\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.056915 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-serving-cert\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.056983 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4363b09f-d35c-47ec-b96d-c9437ccf2206-audit-policies\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057057 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-audit\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057121 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-etcd-client\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057209 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8168819b-6cca-4680-a37d-ade6172d7778-tmp\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057317 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x72kn\" (UniqueName: \"kubernetes.io/projected/4363b09f-d35c-47ec-b96d-c9437ccf2206-kube-api-access-x72kn\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057411 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-image-import-ca\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057540 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4363b09f-d35c-47ec-b96d-c9437ccf2206-encryption-config\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057614 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-encryption-config\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057694 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/fa8ee3ba-16b1-45ac-84be-6da36536cc06-machine-approver-tls\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057759 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4363b09f-d35c-47ec-b96d-c9437ccf2206-etcd-client\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057829 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4363b09f-d35c-47ec-b96d-c9437ccf2206-audit-dir\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057913 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa8ee3ba-16b1-45ac-84be-6da36536cc06-config\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057991 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-config\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.058094 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8168819b-6cca-4680-a37d-ade6172d7778-serving-cert\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.058202 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0c6013b9-649a-4f77-a54b-c272bbbdf392-tmp\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.058297 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-config\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.058369 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-rr2z4\" (UniqueName: \"kubernetes.io/projected/fa8ee3ba-16b1-45ac-84be-6da36536cc06-kube-api-access-rr2z4\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.058484 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-trusted-ca-bundle\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.058569 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-proxy-ca-bundles\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057873 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8168819b-6cca-4680-a37d-ade6172d7778-tmp\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.058037 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.057556 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2"] Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.058770 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4363b09f-d35c-47ec-b96d-c9437ccf2206-trusted-ca-bundle\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.059322 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-client-ca\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.059371 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-6dd77\" (UniqueName: \"kubernetes.io/projected/8168819b-6cca-4680-a37d-ade6172d7778-kube-api-access-6dd77\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.059415 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fa8ee3ba-16b1-45ac-84be-6da36536cc06-auth-proxy-config\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.059484 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-client-ca\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.059509 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4363b09f-d35c-47ec-b96d-c9437ccf2206-etcd-serving-ca\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.059534 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7thdn\" (UniqueName: \"kubernetes.io/projected/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-kube-api-access-7thdn\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.059914 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-config\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.060333 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-client-ca\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.060369 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-proxy-ca-bundles\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.061538 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa8ee3ba-16b1-45ac-84be-6da36536cc06-config\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.061852 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-tls\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.061996 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fa8ee3ba-16b1-45ac-84be-6da36536cc06-auth-proxy-config\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.063203 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8168819b-6cca-4680-a37d-ade6172d7778-serving-cert\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.063298 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-dockercfg-6n5ln\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.064053 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/fa8ee3ba-16b1-45ac-84be-6da36536cc06-machine-approver-tls\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.065025 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"kube-rbac-proxy\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.066556 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.067046 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.073926 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-images\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.085079 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dd77\" (UniqueName: \"kubernetes.io/projected/8168819b-6cca-4680-a37d-ade6172d7778-kube-api-access-6dd77\") pod \"controller-manager-65b6cccf98-6x6hj\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.093658 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr2z4\" (UniqueName: \"kubernetes.io/projected/fa8ee3ba-16b1-45ac-84be-6da36536cc06-kube-api-access-rr2z4\") pod \"machine-approver-54c688565-wmrc9\" (UID: \"fa8ee3ba-16b1-45ac-84be-6da36536cc06\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.102771 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.102853 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-67c89758df-l9h6v"] Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.106186 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-config\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.106338 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-dockercfg-6c46w\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.106471 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.106581 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.106593 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-serving-cert\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.140240 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29495520-tvgs8" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.160489 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-client-ca\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.160536 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/682184a8-29d6-4081-99ac-9d5989e169ab-machine-api-operator-tls\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.160674 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q52w\" (UniqueName: \"kubernetes.io/projected/682184a8-29d6-4081-99ac-9d5989e169ab-kube-api-access-2q52w\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.160721 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4363b09f-d35c-47ec-b96d-c9437ccf2206-etcd-serving-ca\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.160744 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-7thdn\" (UniqueName: \"kubernetes.io/projected/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-kube-api-access-7thdn\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.160768 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c6013b9-649a-4f77-a54b-c272bbbdf392-serving-cert\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.160787 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4363b09f-d35c-47ec-b96d-c9437ccf2206-serving-cert\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.160803 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-config\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161032 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-etcd-serving-ca\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161082 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-audit-dir\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161116 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-6lw6h\" (UniqueName: \"kubernetes.io/projected/0c6013b9-649a-4f77-a54b-c272bbbdf392-kube-api-access-6lw6h\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161143 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-node-pullsecrets\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161161 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-serving-cert\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161182 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4363b09f-d35c-47ec-b96d-c9437ccf2206-audit-policies\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161205 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-audit\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161222 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-etcd-client\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161245 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-x72kn\" (UniqueName: \"kubernetes.io/projected/4363b09f-d35c-47ec-b96d-c9437ccf2206-kube-api-access-x72kn\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161260 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-image-import-ca\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161263 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-audit-dir\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161304 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/682184a8-29d6-4081-99ac-9d5989e169ab-config\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.161418 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-node-pullsecrets\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.162730 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-etcd-serving-ca\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.162737 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4363b09f-d35c-47ec-b96d-c9437ccf2206-etcd-serving-ca\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163348 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4363b09f-d35c-47ec-b96d-c9437ccf2206-encryption-config\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163443 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-encryption-config\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163487 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53c88ff0-d29e-4cb6-8904-e1e203848f51-config\") pod \"openshift-apiserver-operator-846cbfc458-nk2l2\" (UID: \"53c88ff0-d29e-4cb6-8904-e1e203848f51\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163527 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrdxs\" (UniqueName: \"kubernetes.io/projected/53c88ff0-d29e-4cb6-8904-e1e203848f51-kube-api-access-nrdxs\") pod \"openshift-apiserver-operator-846cbfc458-nk2l2\" (UID: \"53c88ff0-d29e-4cb6-8904-e1e203848f51\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163561 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4363b09f-d35c-47ec-b96d-c9437ccf2206-etcd-client\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163588 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4363b09f-d35c-47ec-b96d-c9437ccf2206-audit-dir\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163669 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4363b09f-d35c-47ec-b96d-c9437ccf2206-audit-dir\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163725 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0c6013b9-649a-4f77-a54b-c272bbbdf392-tmp\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163770 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-config\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163795 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-trusted-ca-bundle\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163822 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/682184a8-29d6-4081-99ac-9d5989e169ab-images\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163856 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53c88ff0-d29e-4cb6-8904-e1e203848f51-serving-cert\") pod \"openshift-apiserver-operator-846cbfc458-nk2l2\" (UID: \"53c88ff0-d29e-4cb6-8904-e1e203848f51\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.163904 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4363b09f-d35c-47ec-b96d-c9437ccf2206-trusted-ca-bundle\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.164018 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-client-ca\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.164061 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-config\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.164116 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4363b09f-d35c-47ec-b96d-c9437ccf2206-audit-policies\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.164258 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-audit\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.164545 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4363b09f-d35c-47ec-b96d-c9437ccf2206-trusted-ca-bundle\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.164902 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-config\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.164992 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0c6013b9-649a-4f77-a54b-c272bbbdf392-tmp\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.167288 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c6013b9-649a-4f77-a54b-c272bbbdf392-serving-cert\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.170028 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-image-import-ca\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.171886 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-trusted-ca-bundle\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.171906 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4363b09f-d35c-47ec-b96d-c9437ccf2206-serving-cert\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.172764 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4363b09f-d35c-47ec-b96d-c9437ccf2206-encryption-config\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.173700 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.175260 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4363b09f-d35c-47ec-b96d-c9437ccf2206-etcd-client\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.182770 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-7thdn\" (UniqueName: \"kubernetes.io/projected/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-kube-api-access-7thdn\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.184932 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-x72kn\" (UniqueName: \"kubernetes.io/projected/4363b09f-d35c-47ec-b96d-c9437ccf2206-kube-api-access-x72kn\") pod \"apiserver-8596bd845d-l5nr4\" (UID: \"4363b09f-d35c-47ec-b96d-c9437ccf2206\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.187286 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lw6h\" (UniqueName: \"kubernetes.io/projected/0c6013b9-649a-4f77-a54b-c272bbbdf392-kube-api-access-6lw6h\") pod \"route-controller-manager-776cdc94d6-8sggv\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.194465 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-console/console-64d44f6ddf-fjtqb"] Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.195100 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.198505 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.199035 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"console-operator-config\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.200341 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.200351 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console-operator\"/\"serving-cert\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.200929 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-serving-cert\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.201290 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-encryption-config\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.201548 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console-operator\"/\"console-operator-dockercfg-kl6m8\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.203197 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4e4d65fd-a484-4711-a91f-cd04e6dfa00a-etcd-client\") pod \"apiserver-9ddfb9f55-9n8tq\" (UID: \"4e4d65fd-a484-4711-a91f-cd04e6dfa00a\") " pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.210371 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"trusted-ca\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.236855 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.239765 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-5777786469-7wrgd"] Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.240067 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.244153 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"service-ca\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.244216 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"console-config\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.244411 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.244493 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-oauth-config\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.244541 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-serving-cert\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.244924 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"oauth-serving-cert\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.245172 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-dockercfg-8dkm8\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.246265 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: W0130 00:11:40.250536 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa8ee3ba_16b1_45ac_84be_6da36536cc06.slice/crio-34c091a149315adc5bb0edff65840533378cd4ad3bffa7f28eaafc01e9c40f72 WatchSource:0}: Error finding container 34c091a149315adc5bb0edff65840533378cd4ad3bffa7f28eaafc01e9c40f72: Status 404 returned error can't find the container with id 34c091a149315adc5bb0edff65840533378cd4ad3bffa7f28eaafc01e9c40f72 Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.251241 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"trusted-ca-bundle\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265195 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b0f3995-a41a-43fe-9811-2a31b2e34c81-serving-cert\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265706 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-trusted-ca-bundle\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265733 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4b0f3995-a41a-43fe-9811-2a31b2e34c81-trusted-ca\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265754 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-oauth-serving-cert\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265789 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/682184a8-29d6-4081-99ac-9d5989e169ab-config\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265815 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsvx4\" (UniqueName: \"kubernetes.io/projected/80f4b56b-65a5-40d2-9a12-0465c0ff492c-kube-api-access-qsvx4\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265837 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53c88ff0-d29e-4cb6-8904-e1e203848f51-config\") pod \"openshift-apiserver-operator-846cbfc458-nk2l2\" (UID: \"53c88ff0-d29e-4cb6-8904-e1e203848f51\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265855 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-nrdxs\" (UniqueName: \"kubernetes.io/projected/53c88ff0-d29e-4cb6-8904-e1e203848f51-kube-api-access-nrdxs\") pod \"openshift-apiserver-operator-846cbfc458-nk2l2\" (UID: \"53c88ff0-d29e-4cb6-8904-e1e203848f51\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265873 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/80f4b56b-65a5-40d2-9a12-0465c0ff492c-console-serving-cert\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265891 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-service-ca\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265933 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/80f4b56b-65a5-40d2-9a12-0465c0ff492c-console-oauth-config\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265952 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tgwl\" (UniqueName: \"kubernetes.io/projected/4b0f3995-a41a-43fe-9811-2a31b2e34c81-kube-api-access-2tgwl\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265974 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/682184a8-29d6-4081-99ac-9d5989e169ab-images\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.265993 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53c88ff0-d29e-4cb6-8904-e1e203848f51-serving-cert\") pod \"openshift-apiserver-operator-846cbfc458-nk2l2\" (UID: \"53c88ff0-d29e-4cb6-8904-e1e203848f51\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.266022 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/682184a8-29d6-4081-99ac-9d5989e169ab-machine-api-operator-tls\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.266047 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2q52w\" (UniqueName: \"kubernetes.io/projected/682184a8-29d6-4081-99ac-9d5989e169ab-kube-api-access-2q52w\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.266074 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-console-config\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.266103 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b0f3995-a41a-43fe-9811-2a31b2e34c81-config\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.266976 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/682184a8-29d6-4081-99ac-9d5989e169ab-config\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.267525 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53c88ff0-d29e-4cb6-8904-e1e203848f51-config\") pod \"openshift-apiserver-operator-846cbfc458-nk2l2\" (UID: \"53c88ff0-d29e-4cb6-8904-e1e203848f51\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.268642 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/682184a8-29d6-4081-99ac-9d5989e169ab-images\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.277994 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/682184a8-29d6-4081-99ac-9d5989e169ab-machine-api-operator-tls\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.285285 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53c88ff0-d29e-4cb6-8904-e1e203848f51-serving-cert\") pod \"openshift-apiserver-operator-846cbfc458-nk2l2\" (UID: \"53c88ff0-d29e-4cb6-8904-e1e203848f51\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.286809 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrdxs\" (UniqueName: \"kubernetes.io/projected/53c88ff0-d29e-4cb6-8904-e1e203848f51-kube-api-access-nrdxs\") pod \"openshift-apiserver-operator-846cbfc458-nk2l2\" (UID: \"53c88ff0-d29e-4cb6-8904-e1e203848f51\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.300251 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q52w\" (UniqueName: \"kubernetes.io/projected/682184a8-29d6-4081-99ac-9d5989e169ab-kube-api-access-2q52w\") pod \"machine-api-operator-755bb95488-sxw6b\" (UID: \"682184a8-29d6-4081-99ac-9d5989e169ab\") " pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.324576 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.333461 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.349543 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.351406 5119 generic.go:358] "Generic (PLEG): container finished" podID="7be2f013-d656-48d9-b332-e66e20efa66f" containerID="32c09e2996dc4fa632a359de65dbd14293210841f9ce46f59fc08abe3bd55636" exitCode=0 Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367578 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b0f3995-a41a-43fe-9811-2a31b2e34c81-serving-cert\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367624 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-trusted-ca-bundle\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367645 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4b0f3995-a41a-43fe-9811-2a31b2e34c81-trusted-ca\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367673 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-oauth-serving-cert\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367710 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-qsvx4\" (UniqueName: \"kubernetes.io/projected/80f4b56b-65a5-40d2-9a12-0465c0ff492c-kube-api-access-qsvx4\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367730 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/80f4b56b-65a5-40d2-9a12-0465c0ff492c-console-serving-cert\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367748 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-service-ca\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367800 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/80f4b56b-65a5-40d2-9a12-0465c0ff492c-console-oauth-config\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367817 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2tgwl\" (UniqueName: \"kubernetes.io/projected/4b0f3995-a41a-43fe-9811-2a31b2e34c81-kube-api-access-2tgwl\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367865 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-console-config\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.367889 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b0f3995-a41a-43fe-9811-2a31b2e34c81-config\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.368895 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b0f3995-a41a-43fe-9811-2a31b2e34c81-config\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.371214 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4b0f3995-a41a-43fe-9811-2a31b2e34c81-trusted-ca\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.372028 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-console-config\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.372427 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-oauth-serving-cert\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.372817 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-service-ca\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.376052 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/80f4b56b-65a5-40d2-9a12-0465c0ff492c-console-serving-cert\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.376460 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80f4b56b-65a5-40d2-9a12-0465c0ff492c-trusted-ca-bundle\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.379780 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/80f4b56b-65a5-40d2-9a12-0465c0ff492c-console-oauth-config\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.384959 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b0f3995-a41a-43fe-9811-2a31b2e34c81-serving-cert\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.385014 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsvx4\" (UniqueName: \"kubernetes.io/projected/80f4b56b-65a5-40d2-9a12-0465c0ff492c-kube-api-access-qsvx4\") pod \"console-64d44f6ddf-fjtqb\" (UID: \"80f4b56b-65a5-40d2-9a12-0465c0ff492c\") " pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.385199 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.392122 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tgwl\" (UniqueName: \"kubernetes.io/projected/4b0f3995-a41a-43fe-9811-2a31b2e34c81-kube-api-access-2tgwl\") pod \"console-operator-67c89758df-l9h6v\" (UID: \"4b0f3995-a41a-43fe-9811-2a31b2e34c81\") " pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.400473 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc"] Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.400687 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.405290 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-config-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.406046 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-config-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.406724 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-config-operator\"/\"config-operator-serving-cert\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.406977 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-config-operator\"/\"openshift-config-operator-dockercfg-sjn6s\"" Jan 30 00:11:40 crc kubenswrapper[5119]: W0130 00:11:40.418143 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8168819b_6cca_4680_a37d_ade6172d7778.slice/crio-499a856683a97ce279a85b1f1235dd9123c3cce8869f3cf44dc5df6029257e48 WatchSource:0}: Error finding container 499a856683a97ce279a85b1f1235dd9123c3cce8869f3cf44dc5df6029257e48: Status 404 returned error can't find the container with id 499a856683a97ce279a85b1f1235dd9123c3cce8869f3cf44dc5df6029257e48 Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.419126 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.468980 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/c423b388-bcb0-40a3-9889-9ec109779849-available-featuregates\") pod \"openshift-config-operator-5777786469-7wrgd\" (UID: \"c423b388-bcb0-40a3-9889-9ec109779849\") " pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.469024 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c423b388-bcb0-40a3-9889-9ec109779849-serving-cert\") pod \"openshift-config-operator-5777786469-7wrgd\" (UID: \"c423b388-bcb0-40a3-9889-9ec109779849\") " pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.469272 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n54nr\" (UniqueName: \"kubernetes.io/projected/c423b388-bcb0-40a3-9889-9ec109779849-kube-api-access-n54nr\") pod \"openshift-config-operator-5777786469-7wrgd\" (UID: \"c423b388-bcb0-40a3-9889-9ec109779849\") " pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.512493 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:40 crc kubenswrapper[5119]: W0130 00:11:40.555364 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c6013b9_649a_4f77_a54b_c272bbbdf392.slice/crio-827e1a6a18163f6bdb6ebd244ba8446ccd09192417032b44fa5f239660fcbb52 WatchSource:0}: Error finding container 827e1a6a18163f6bdb6ebd244ba8446ccd09192417032b44fa5f239660fcbb52: Status 404 returned error can't find the container with id 827e1a6a18163f6bdb6ebd244ba8446ccd09192417032b44fa5f239660fcbb52 Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.558767 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.569992 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c423b388-bcb0-40a3-9889-9ec109779849-serving-cert\") pod \"openshift-config-operator-5777786469-7wrgd\" (UID: \"c423b388-bcb0-40a3-9889-9ec109779849\") " pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.570113 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-n54nr\" (UniqueName: \"kubernetes.io/projected/c423b388-bcb0-40a3-9889-9ec109779849-kube-api-access-n54nr\") pod \"openshift-config-operator-5777786469-7wrgd\" (UID: \"c423b388-bcb0-40a3-9889-9ec109779849\") " pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.570156 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/c423b388-bcb0-40a3-9889-9ec109779849-available-featuregates\") pod \"openshift-config-operator-5777786469-7wrgd\" (UID: \"c423b388-bcb0-40a3-9889-9ec109779849\") " pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.570560 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/c423b388-bcb0-40a3-9889-9ec109779849-available-featuregates\") pod \"openshift-config-operator-5777786469-7wrgd\" (UID: \"c423b388-bcb0-40a3-9889-9ec109779849\") " pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.586113 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c423b388-bcb0-40a3-9889-9ec109779849-serving-cert\") pod \"openshift-config-operator-5777786469-7wrgd\" (UID: \"c423b388-bcb0-40a3-9889-9ec109779849\") " pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.593629 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-n54nr\" (UniqueName: \"kubernetes.io/projected/c423b388-bcb0-40a3-9889-9ec109779849-kube-api-access-n54nr\") pod \"openshift-config-operator-5777786469-7wrgd\" (UID: \"c423b388-bcb0-40a3-9889-9ec109779849\") " pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: W0130 00:11:40.627628 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod682184a8_29d6_4081_99ac_9d5989e169ab.slice/crio-cf951f88577db5222dd55f6a5b88ccb8b271528c17e801fe57a3389f62615042 WatchSource:0}: Error finding container cf951f88577db5222dd55f6a5b88ccb8b271528c17e801fe57a3389f62615042: Status 404 returned error can't find the container with id cf951f88577db5222dd55f6a5b88ccb8b271528c17e801fe57a3389f62615042 Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.732051 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.782725 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:40 crc kubenswrapper[5119]: W0130 00:11:40.782716 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53c88ff0_d29e_4cb6_8904_e1e203848f51.slice/crio-e31934ab371dbf6e732ee4a847077810ba5f4f86b2eba2f3f063528216805fce WatchSource:0}: Error finding container e31934ab371dbf6e732ee4a847077810ba5f4f86b2eba2f3f063528216805fce: Status 404 returned error can't find the container with id e31934ab371dbf6e732ee4a847077810ba5f4f86b2eba2f3f063528216805fce Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.782746 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.782826 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.787727 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"metrics-daemon-secret\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.789465 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.794812 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.797179 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-config\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.797252 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-dockercfg-vfqp6\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.798294 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"pprof-cert\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.799124 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"metrics-daemon-sa-dockercfg-t8n29\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.815458 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerDied","Data":"32c09e2996dc4fa632a359de65dbd14293210841f9ce46f59fc08abe3bd55636"} Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.815534 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-69b85846b6-x62sl"] Jan 30 00:11:40 crc kubenswrapper[5119]: W0130 00:11:40.831645 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b0f3995_a41a_43fe_9811_2a31b2e34c81.slice/crio-9498db5f168de93f2c79bf455b3a9e885581b25d32109f91cb17a2af14d213a4 WatchSource:0}: Error finding container 9498db5f168de93f2c79bf455b3a9e885581b25d32109f91cb17a2af14d213a4: Status 404 returned error can't find the container with id 9498db5f168de93f2c79bf455b3a9e885581b25d32109f91cb17a2af14d213a4 Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.878869 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8304fa9-6220-4bf9-a154-177628944fc1-secret-volume\") pod \"collect-profiles-29495520-8cltc\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.878940 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8304fa9-6220-4bf9-a154-177628944fc1-config-volume\") pod \"collect-profiles-29495520-8cltc\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.878963 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klllr\" (UniqueName: \"kubernetes.io/projected/f8304fa9-6220-4bf9-a154-177628944fc1-kube-api-access-klllr\") pod \"collect-profiles-29495520-8cltc\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:40 crc kubenswrapper[5119]: W0130 00:11:40.938563 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc423b388_bcb0_40a3_9889_9ec109779849.slice/crio-409791d43e65f31fa7dcb59f3081d2cc1e6c74386579a436373d8e90e1a8ac25 WatchSource:0}: Error finding container 409791d43e65f31fa7dcb59f3081d2cc1e6c74386579a436373d8e90e1a8ac25: Status 404 returned error can't find the container with id 409791d43e65f31fa7dcb59f3081d2cc1e6c74386579a436373d8e90e1a8ac25 Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.968814 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.969464 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26"] Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.972635 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-config\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.972965 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-ca-bundle\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.973152 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.974163 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-client\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.974403 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-service-ca-bundle\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.974455 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.974552 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-dockercfg-4vdnc\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.974700 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-serving-cert\"" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.980088 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8304fa9-6220-4bf9-a154-177628944fc1-config-volume\") pod \"collect-profiles-29495520-8cltc\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.980132 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-klllr\" (UniqueName: \"kubernetes.io/projected/f8304fa9-6220-4bf9-a154-177628944fc1-kube-api-access-klllr\") pod \"collect-profiles-29495520-8cltc\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.980207 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8304fa9-6220-4bf9-a154-177628944fc1-secret-volume\") pod \"collect-profiles-29495520-8cltc\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.981486 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8304fa9-6220-4bf9-a154-177628944fc1-config-volume\") pod \"collect-profiles-29495520-8cltc\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.989191 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8304fa9-6220-4bf9-a154-177628944fc1-secret-volume\") pod \"collect-profiles-29495520-8cltc\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:40 crc kubenswrapper[5119]: I0130 00:11:40.997472 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-klllr\" (UniqueName: \"kubernetes.io/projected/f8304fa9-6220-4bf9-a154-177628944fc1-kube-api-access-klllr\") pod \"collect-profiles-29495520-8cltc\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.048549 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" event={"ID":"fa8ee3ba-16b1-45ac-84be-6da36536cc06","Type":"ContainerStarted","Data":"34c091a149315adc5bb0edff65840533378cd4ad3bffa7f28eaafc01e9c40f72"} Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.048610 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7"] Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.048630 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.054198 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication-operator\"/\"authentication-operator-dockercfg-6tbpn\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.054272 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.054936 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication-operator\"/\"serving-cert\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.055419 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"service-ca-bundle\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.058585 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"authentication-operator-config\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.059544 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-747b44746d-nts9m"] Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.060139 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.060729 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"trusted-ca-bundle\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.060909 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.062189 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-samples-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.062524 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-samples-operator\"/\"samples-operator-tls\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.062731 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-samples-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.068893 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-samples-operator\"/\"cluster-samples-operator-dockercfg-jmhxf\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.081805 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f65bd14d-60b5-4288-9fa3-6322881a4015-serving-cert\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.082003 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1469b0b-e1a0-4898-ba56-76ae9cce8867-service-ca-bundle\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.082111 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1469b0b-e1a0-4898-ba56-76ae9cce8867-config\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.082265 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f65bd14d-60b5-4288-9fa3-6322881a4015-etcd-service-ca\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.082374 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1469b0b-e1a0-4898-ba56-76ae9cce8867-serving-cert\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.082480 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmht8\" (UniqueName: \"kubernetes.io/projected/d1469b0b-e1a0-4898-ba56-76ae9cce8867-kube-api-access-nmht8\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.082604 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f65bd14d-60b5-4288-9fa3-6322881a4015-etcd-ca\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.082749 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f65bd14d-60b5-4288-9fa3-6322881a4015-etcd-client\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.082908 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mr2g\" (UniqueName: \"kubernetes.io/projected/f65bd14d-60b5-4288-9fa3-6322881a4015-kube-api-access-5mr2g\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.083017 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1469b0b-e1a0-4898-ba56-76ae9cce8867-trusted-ca-bundle\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.083134 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f65bd14d-60b5-4288-9fa3-6322881a4015-tmp-dir\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.083228 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f65bd14d-60b5-4288-9fa3-6322881a4015-config\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.150228 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.168487 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb"] Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.183911 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/67a37ab3-c501-4e8d-96f2-907b849e3856-samples-operator-tls\") pod \"cluster-samples-operator-6b564684c8-rqpj7\" (UID: \"67a37ab3-c501-4e8d-96f2-907b849e3856\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.183961 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1469b0b-e1a0-4898-ba56-76ae9cce8867-config\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.183981 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f65bd14d-60b5-4288-9fa3-6322881a4015-etcd-service-ca\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.184008 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1469b0b-e1a0-4898-ba56-76ae9cce8867-serving-cert\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.184026 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-nmht8\" (UniqueName: \"kubernetes.io/projected/d1469b0b-e1a0-4898-ba56-76ae9cce8867-kube-api-access-nmht8\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.184043 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f65bd14d-60b5-4288-9fa3-6322881a4015-etcd-ca\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.184060 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f65bd14d-60b5-4288-9fa3-6322881a4015-etcd-client\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.185060 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1469b0b-e1a0-4898-ba56-76ae9cce8867-config\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.185221 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-5mr2g\" (UniqueName: \"kubernetes.io/projected/f65bd14d-60b5-4288-9fa3-6322881a4015-kube-api-access-5mr2g\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.185291 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6h84k\" (UniqueName: \"kubernetes.io/projected/67a37ab3-c501-4e8d-96f2-907b849e3856-kube-api-access-6h84k\") pod \"cluster-samples-operator-6b564684c8-rqpj7\" (UID: \"67a37ab3-c501-4e8d-96f2-907b849e3856\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.185362 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1469b0b-e1a0-4898-ba56-76ae9cce8867-trusted-ca-bundle\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.185486 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f65bd14d-60b5-4288-9fa3-6322881a4015-tmp-dir\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.185538 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f65bd14d-60b5-4288-9fa3-6322881a4015-config\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.185555 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f65bd14d-60b5-4288-9fa3-6322881a4015-etcd-ca\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.185571 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f65bd14d-60b5-4288-9fa3-6322881a4015-serving-cert\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.185642 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1469b0b-e1a0-4898-ba56-76ae9cce8867-service-ca-bundle\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.185869 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f65bd14d-60b5-4288-9fa3-6322881a4015-tmp-dir\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.186334 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1469b0b-e1a0-4898-ba56-76ae9cce8867-service-ca-bundle\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.186954 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1469b0b-e1a0-4898-ba56-76ae9cce8867-trusted-ca-bundle\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.187044 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f65bd14d-60b5-4288-9fa3-6322881a4015-config\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.187427 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f65bd14d-60b5-4288-9fa3-6322881a4015-etcd-service-ca\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.194218 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f65bd14d-60b5-4288-9fa3-6322881a4015-etcd-client\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.195272 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f65bd14d-60b5-4288-9fa3-6322881a4015-serving-cert\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.196186 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1469b0b-e1a0-4898-ba56-76ae9cce8867-serving-cert\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.202798 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmht8\" (UniqueName: \"kubernetes.io/projected/d1469b0b-e1a0-4898-ba56-76ae9cce8867-kube-api-access-nmht8\") pod \"authentication-operator-7f5c659b84-xnr26\" (UID: \"d1469b0b-e1a0-4898-ba56-76ae9cce8867\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.208965 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mr2g\" (UniqueName: \"kubernetes.io/projected/f65bd14d-60b5-4288-9fa3-6322881a4015-kube-api-access-5mr2g\") pod \"etcd-operator-69b85846b6-x62sl\" (UID: \"f65bd14d-60b5-4288-9fa3-6322881a4015\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.290407 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8ps7\" (UniqueName: \"kubernetes.io/projected/ff01653b-8f2a-47a1-ae0c-0ac878c25570-kube-api-access-j8ps7\") pod \"downloads-747b44746d-nts9m\" (UID: \"ff01653b-8f2a-47a1-ae0c-0ac878c25570\") " pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.290465 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-6h84k\" (UniqueName: \"kubernetes.io/projected/67a37ab3-c501-4e8d-96f2-907b849e3856-kube-api-access-6h84k\") pod \"cluster-samples-operator-6b564684c8-rqpj7\" (UID: \"67a37ab3-c501-4e8d-96f2-907b849e3856\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.290542 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/67a37ab3-c501-4e8d-96f2-907b849e3856-samples-operator-tls\") pod \"cluster-samples-operator-6b564684c8-rqpj7\" (UID: \"67a37ab3-c501-4e8d-96f2-907b849e3856\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.304692 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/67a37ab3-c501-4e8d-96f2-907b849e3856-samples-operator-tls\") pod \"cluster-samples-operator-6b564684c8-rqpj7\" (UID: \"67a37ab3-c501-4e8d-96f2-907b849e3856\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.312295 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-6h84k\" (UniqueName: \"kubernetes.io/projected/67a37ab3-c501-4e8d-96f2-907b849e3856-kube-api-access-6h84k\") pod \"cluster-samples-operator-6b564684c8-rqpj7\" (UID: \"67a37ab3-c501-4e8d-96f2-907b849e3856\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" Jan 30 00:11:41 crc kubenswrapper[5119]: W0130 00:11:41.355924 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8304fa9_6220_4bf9_a154_177628944fc1.slice/crio-14a6018aca423eac1c27c23f9f0b95f486d8a25e88951f032246a1c32e74dd9a WatchSource:0}: Error finding container 14a6018aca423eac1c27c23f9f0b95f486d8a25e88951f032246a1c32e74dd9a: Status 404 returned error can't find the container with id 14a6018aca423eac1c27c23f9f0b95f486d8a25e88951f032246a1c32e74dd9a Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.385639 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.392113 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-j8ps7\" (UniqueName: \"kubernetes.io/projected/ff01653b-8f2a-47a1-ae0c-0ac878c25570-kube-api-access-j8ps7\") pod \"downloads-747b44746d-nts9m\" (UID: \"ff01653b-8f2a-47a1-ae0c-0ac878c25570\") " pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.393036 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.399459 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.409845 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8ps7\" (UniqueName: \"kubernetes.io/projected/ff01653b-8f2a-47a1-ae0c-0ac878c25570-kube-api-access-j8ps7\") pod \"downloads-747b44746d-nts9m\" (UID: \"ff01653b-8f2a-47a1-ae0c-0ac878c25570\") " pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.435523 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-67c89758df-l9h6v" event={"ID":"4b0f3995-a41a-43fe-9811-2a31b2e34c81","Type":"ContainerStarted","Data":"9498db5f168de93f2c79bf455b3a9e885581b25d32109f91cb17a2af14d213a4"} Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.435575 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" event={"ID":"53c88ff0-d29e-4cb6-8904-e1e203848f51","Type":"ContainerStarted","Data":"e31934ab371dbf6e732ee4a847077810ba5f4f86b2eba2f3f063528216805fce"} Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.435589 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" event={"ID":"4363b09f-d35c-47ec-b96d-c9437ccf2206","Type":"ContainerStarted","Data":"13aaf0ee2d412eeff28dc68533f553ba711633e07fafd7b71321271c4611cada"} Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.435605 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5"] Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.435687 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.435930 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.440592 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"image-registry-operator-tls\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.441041 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"cluster-image-registry-operator-dockercfg-ntnd7\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.442572 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"default-dockercfg-mdwwj\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.447512 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"trusted-ca\"" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.469770 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.494115 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/27037d8c-3db2-4e66-8680-7804a89fd519-bound-sa-token\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.494161 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted-pem\" (UniqueName: \"kubernetes.io/empty-dir/27037d8c-3db2-4e66-8680-7804a89fd519-ca-trust-extracted-pem\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.494237 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/27037d8c-3db2-4e66-8680-7804a89fd519-image-registry-operator-tls\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.494281 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/27037d8c-3db2-4e66-8680-7804a89fd519-tmp\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.494303 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27037d8c-3db2-4e66-8680-7804a89fd519-trusted-ca\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.494320 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc7v5\" (UniqueName: \"kubernetes.io/projected/27037d8c-3db2-4e66-8680-7804a89fd519-kube-api-access-vc7v5\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.597131 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/27037d8c-3db2-4e66-8680-7804a89fd519-bound-sa-token\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.597177 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-trust-extracted-pem\" (UniqueName: \"kubernetes.io/empty-dir/27037d8c-3db2-4e66-8680-7804a89fd519-ca-trust-extracted-pem\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.597261 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/27037d8c-3db2-4e66-8680-7804a89fd519-image-registry-operator-tls\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.597297 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/27037d8c-3db2-4e66-8680-7804a89fd519-tmp\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.597323 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27037d8c-3db2-4e66-8680-7804a89fd519-trusted-ca\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.597370 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-vc7v5\" (UniqueName: \"kubernetes.io/projected/27037d8c-3db2-4e66-8680-7804a89fd519-kube-api-access-vc7v5\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.598574 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted-pem\" (UniqueName: \"kubernetes.io/empty-dir/27037d8c-3db2-4e66-8680-7804a89fd519-ca-trust-extracted-pem\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.599521 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/27037d8c-3db2-4e66-8680-7804a89fd519-tmp\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.600652 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27037d8c-3db2-4e66-8680-7804a89fd519-trusted-ca\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.605529 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/27037d8c-3db2-4e66-8680-7804a89fd519-image-registry-operator-tls\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.622562 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc7v5\" (UniqueName: \"kubernetes.io/projected/27037d8c-3db2-4e66-8680-7804a89fd519-kube-api-access-vc7v5\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.629659 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/27037d8c-3db2-4e66-8680-7804a89fd519-bound-sa-token\") pod \"cluster-image-registry-operator-86c45576b9-fp4qb\" (UID: \"27037d8c-3db2-4e66-8680-7804a89fd519\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: W0130 00:11:41.779713 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff01653b_8f2a_47a1_ae0c_0ac878c25570.slice/crio-e23d8a3af5e038e0c13cd66aa8950397c08ee9eea75e43c1b5cb6816cc7e1798 WatchSource:0}: Error finding container e23d8a3af5e038e0c13cd66aa8950397c08ee9eea75e43c1b5cb6816cc7e1798: Status 404 returned error can't find the container with id e23d8a3af5e038e0c13cd66aa8950397c08ee9eea75e43c1b5cb6816cc7e1798 Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.786150 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" Jan 30 00:11:41 crc kubenswrapper[5119]: I0130 00:11:41.930937 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db"] Jan 30 00:11:41 crc kubenswrapper[5119]: W0130 00:11:41.994541 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27037d8c_3db2_4e66_8680_7804a89fd519.slice/crio-b35526e5b92f62938023f337933ff46e4222f228282bef465dd08ec56a51da49 WatchSource:0}: Error finding container b35526e5b92f62938023f337933ff46e4222f228282bef465dd08ec56a51da49: Status 404 returned error can't find the container with id b35526e5b92f62938023f337933ff46e4222f228282bef465dd08ec56a51da49 Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.004165 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/16e266d0-7218-4013-a778-df876d9f8270-mcc-auth-proxy-config\") pod \"machine-config-controller-f9cdd68f7-9mml5\" (UID: \"16e266d0-7218-4013-a778-df876d9f8270\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.004223 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/16e266d0-7218-4013-a778-df876d9f8270-proxy-tls\") pod \"machine-config-controller-f9cdd68f7-9mml5\" (UID: \"16e266d0-7218-4013-a778-df876d9f8270\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.004256 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghmhp\" (UniqueName: \"kubernetes.io/projected/16e266d0-7218-4013-a778-df876d9f8270-kube-api-access-ghmhp\") pod \"machine-config-controller-f9cdd68f7-9mml5\" (UID: \"16e266d0-7218-4013-a778-df876d9f8270\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.105950 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/16e266d0-7218-4013-a778-df876d9f8270-mcc-auth-proxy-config\") pod \"machine-config-controller-f9cdd68f7-9mml5\" (UID: \"16e266d0-7218-4013-a778-df876d9f8270\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.106869 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/16e266d0-7218-4013-a778-df876d9f8270-mcc-auth-proxy-config\") pod \"machine-config-controller-f9cdd68f7-9mml5\" (UID: \"16e266d0-7218-4013-a778-df876d9f8270\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.106946 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/16e266d0-7218-4013-a778-df876d9f8270-proxy-tls\") pod \"machine-config-controller-f9cdd68f7-9mml5\" (UID: \"16e266d0-7218-4013-a778-df876d9f8270\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.106972 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-ghmhp\" (UniqueName: \"kubernetes.io/projected/16e266d0-7218-4013-a778-df876d9f8270-kube-api-access-ghmhp\") pod \"machine-config-controller-f9cdd68f7-9mml5\" (UID: \"16e266d0-7218-4013-a778-df876d9f8270\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: E0130 00:11:42.107063 5119 secret.go:189] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Jan 30 00:11:42 crc kubenswrapper[5119]: E0130 00:11:42.107113 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/16e266d0-7218-4013-a778-df876d9f8270-proxy-tls podName:16e266d0-7218-4013-a778-df876d9f8270 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.607100622 +0000 UTC m=+106.621163081 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/16e266d0-7218-4013-a778-df876d9f8270-proxy-tls") pod "machine-config-controller-f9cdd68f7-9mml5" (UID: "16e266d0-7218-4013-a778-df876d9f8270") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.125303 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghmhp\" (UniqueName: \"kubernetes.io/projected/16e266d0-7218-4013-a778-df876d9f8270-kube-api-access-ghmhp\") pod \"machine-config-controller-f9cdd68f7-9mml5\" (UID: \"16e266d0-7218-4013-a778-df876d9f8270\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.240248 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-c5xkv"] Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.240305 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.240327 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.243433 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"mcc-proxy-tls\"" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.243697 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-dockercfg-jcmfj\"" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.244214 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-config\"" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.244359 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-serving-cert\"" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.244376 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.244608 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-controller-dockercfg-xnj77\"" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.247675 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.309695 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67e5ee6c-a546-4a02-84ad-b736baa67181-config\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.309768 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2qgd\" (UniqueName: \"kubernetes.io/projected/67e5ee6c-a546-4a02-84ad-b736baa67181-kube-api-access-c2qgd\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.309872 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/67e5ee6c-a546-4a02-84ad-b736baa67181-tmp\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.310231 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67e5ee6c-a546-4a02-84ad-b736baa67181-serving-cert\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.383767 5119 generic.go:358] "Generic (PLEG): container finished" podID="4363b09f-d35c-47ec-b96d-c9437ccf2206" containerID="a426c347164125304d2bbf0fc4c2cdd8f387f9f6a1e4f4b07a89acbb2dd08d7d" exitCode=0 Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.411827 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/67e5ee6c-a546-4a02-84ad-b736baa67181-tmp\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.411920 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67e5ee6c-a546-4a02-84ad-b736baa67181-serving-cert\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.412013 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67e5ee6c-a546-4a02-84ad-b736baa67181-config\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.412043 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-c2qgd\" (UniqueName: \"kubernetes.io/projected/67e5ee6c-a546-4a02-84ad-b736baa67181-kube-api-access-c2qgd\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.413171 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67e5ee6c-a546-4a02-84ad-b736baa67181-config\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.413438 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/67e5ee6c-a546-4a02-84ad-b736baa67181-tmp\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.416617 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67e5ee6c-a546-4a02-84ad-b736baa67181-serving-cert\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.433350 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2qgd\" (UniqueName: \"kubernetes.io/projected/67e5ee6c-a546-4a02-84ad-b736baa67181-kube-api-access-c2qgd\") pod \"openshift-controller-manager-operator-686468bdd5-r97db\" (UID: \"67e5ee6c-a546-4a02-84ad-b736baa67181\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.570890 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.613953 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/16e266d0-7218-4013-a778-df876d9f8270-proxy-tls\") pod \"machine-config-controller-f9cdd68f7-9mml5\" (UID: \"16e266d0-7218-4013-a778-df876d9f8270\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.618654 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/16e266d0-7218-4013-a778-df876d9f8270-proxy-tls\") pod \"machine-config-controller-f9cdd68f7-9mml5\" (UID: \"16e266d0-7218-4013-a778-df876d9f8270\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.739792 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb"] Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.740067 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.743586 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"installation-pull-secrets\"" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.743742 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"registry-dockercfg-6w67b\"" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.745268 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"image-registry-tls\"" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.817122 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-installation-pull-secrets\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.817161 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pv8f7\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-kube-api-access-pv8f7\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.817202 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-certificates\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.817276 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-ca-trust-extracted\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.817331 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.817352 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-tls\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.817404 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-trusted-ca\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.817424 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-bound-sa-token\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: E0130 00:11:42.819201 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.319186436 +0000 UTC m=+107.333248895 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.861777 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.918620 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:42 crc kubenswrapper[5119]: E0130 00:11:42.918839 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.418816605 +0000 UTC m=+107.432879074 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.918879 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-installation-pull-secrets\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.918911 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-pv8f7\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-kube-api-access-pv8f7\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.918942 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-certificates\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.919380 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-ca-trust-extracted\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.919440 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.919466 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-tls\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.919530 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-trusted-ca\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.919555 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-bound-sa-token\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.920344 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-ca-trust-extracted\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: E0130 00:11:42.921777 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.421756016 +0000 UTC m=+107.435818545 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.921951 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-trusted-ca\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.922664 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-certificates\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.925164 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-installation-pull-secrets\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.925447 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-tls\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.938262 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-pv8f7\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-kube-api-access-pv8f7\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:42 crc kubenswrapper[5119]: I0130 00:11:42.939022 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-bound-sa-token\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.020295 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.020459 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.520442491 +0000 UTC m=+107.534504950 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.020555 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.020811 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.52080469 +0000 UTC m=+107.534867139 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.081617 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" event={"ID":"fa8ee3ba-16b1-45ac-84be-6da36536cc06","Type":"ContainerStarted","Data":"a4e121d5d13eae28d5306494df816e067c7f9a0d5a65a2941dfd2ad0c5721b76"} Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.081917 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" event={"ID":"0c6013b9-649a-4f77-a54b-c272bbbdf392","Type":"ContainerStarted","Data":"827e1a6a18163f6bdb6ebd244ba8446ccd09192417032b44fa5f239660fcbb52"} Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.081834 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.081936 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645"] Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.084775 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.085274 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-config\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.085473 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-serving-cert\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.085686 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-dockercfg-bf7fj\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.122133 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.122319 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1758db3d-11c0-41e8-b35c-d9d296ef3b54-config\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.122346 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1758db3d-11c0-41e8-b35c-d9d296ef3b54-kube-api-access\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.122385 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1758db3d-11c0-41e8-b35c-d9d296ef3b54-serving-cert\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.122441 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/1758db3d-11c0-41e8-b35c-d9d296ef3b54-tmp-dir\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.122602 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.62256735 +0000 UTC m=+107.636629809 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.148035 5119 patch_prober.go:28] interesting pod/route-controller-manager-776cdc94d6-8sggv container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.148097 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" podUID="0c6013b9-649a-4f77-a54b-c272bbbdf392" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.155348 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.158365 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df"] Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.156700 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.163945 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.164742 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-operator\"/\"ingress-operator-dockercfg-74nwh\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.166162 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.166672 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-operator\"/\"metrics-tls\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.170074 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"trusted-ca\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.223664 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2888bde6-bdde-4277-b478-2557c52cd1e2-bound-sa-token\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.223731 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1758db3d-11c0-41e8-b35c-d9d296ef3b54-config\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.223757 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1758db3d-11c0-41e8-b35c-d9d296ef3b54-kube-api-access\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.223825 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1758db3d-11c0-41e8-b35c-d9d296ef3b54-serving-cert\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.223866 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/1758db3d-11c0-41e8-b35c-d9d296ef3b54-tmp-dir\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.224148 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2888bde6-bdde-4277-b478-2557c52cd1e2-trusted-ca\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.224185 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wphzq\" (UniqueName: \"kubernetes.io/projected/2888bde6-bdde-4277-b478-2557c52cd1e2-kube-api-access-wphzq\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.224211 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2888bde6-bdde-4277-b478-2557c52cd1e2-metrics-tls\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.224284 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.225215 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.725203532 +0000 UTC m=+107.739265991 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.225567 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/1758db3d-11c0-41e8-b35c-d9d296ef3b54-tmp-dir\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.226022 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1758db3d-11c0-41e8-b35c-d9d296ef3b54-config\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.235866 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1758db3d-11c0-41e8-b35c-d9d296ef3b54-serving-cert\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.238768 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1758db3d-11c0-41e8-b35c-d9d296ef3b54-kube-api-access\") pod \"kube-apiserver-operator-575994946d-r7pzb\" (UID: \"1758db3d-11c0-41e8-b35c-d9d296ef3b54\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.324796 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.324961 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.824942333 +0000 UTC m=+107.839004792 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.325008 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2888bde6-bdde-4277-b478-2557c52cd1e2-metrics-tls\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.325045 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.325074 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2888bde6-bdde-4277-b478-2557c52cd1e2-bound-sa-token\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.325155 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2888bde6-bdde-4277-b478-2557c52cd1e2-trusted-ca\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.325175 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-wphzq\" (UniqueName: \"kubernetes.io/projected/2888bde6-bdde-4277-b478-2557c52cd1e2-kube-api-access-wphzq\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.325410 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.825376583 +0000 UTC m=+107.839439042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.326777 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2888bde6-bdde-4277-b478-2557c52cd1e2-trusted-ca\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.331758 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2888bde6-bdde-4277-b478-2557c52cd1e2-metrics-tls\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.341993 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-wphzq\" (UniqueName: \"kubernetes.io/projected/2888bde6-bdde-4277-b478-2557c52cd1e2-kube-api-access-wphzq\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.354158 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2888bde6-bdde-4277-b478-2557c52cd1e2-bound-sa-token\") pod \"ingress-operator-6b9cb4dbcf-6p645\" (UID: \"2888bde6-bdde-4277-b478-2557c52cd1e2\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.399052 5119 generic.go:358] "Generic (PLEG): container finished" podID="4e4d65fd-a484-4711-a91f-cd04e6dfa00a" containerID="59b9b18c6306f9775dad8524b2919a9074ee9d436eefa67ceb30fe7540c25308" exitCode=0 Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.401510 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.410024 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" event={"ID":"c423b388-bcb0-40a3-9889-9ec109779849","Type":"ContainerStarted","Data":"409791d43e65f31fa7dcb59f3081d2cc1e6c74386579a436373d8e90e1a8ac25"} Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.410089 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-64d44f6ddf-fjtqb" event={"ID":"80f4b56b-65a5-40d2-9a12-0465c0ff492c","Type":"ContainerStarted","Data":"fa9b5d3b13f6a3281025fb98f5ca700ae60d08e365be015b4f62171c2709e7da"} Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.410114 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-799b87ffcd-bzgsm"] Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.410691 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.410810 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" podStartSLOduration=86.410789669 podStartE2EDuration="1m26.410789669s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:43.166464487 +0000 UTC m=+107.180526946" watchObservedRunningTime="2026-01-30 00:11:43.410789669 +0000 UTC m=+107.424852128" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.414758 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"serving-cert\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.415264 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.415271 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-dockercfg-2h6bs\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.415338 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.416682 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"config\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.428361 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.428562 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.928518496 +0000 UTC m=+107.942580955 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.428952 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.429355 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.929345996 +0000 UTC m=+107.943408455 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.474444 5119 patch_prober.go:28] interesting pod/route-controller-manager-776cdc94d6-8sggv container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.474867 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" podUID="0c6013b9-649a-4f77-a54b-c272bbbdf392" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.493742 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.516758 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878"] Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.517702 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" podStartSLOduration=87.517692463 podStartE2EDuration="1m27.517692463s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:43.487200798 +0000 UTC m=+107.501263257" watchObservedRunningTime="2026-01-30 00:11:43.517692463 +0000 UTC m=+107.531754922" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.517733 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.518196 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-64d44f6ddf-fjtqb" podStartSLOduration=87.518191745 podStartE2EDuration="1m27.518191745s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:43.505258713 +0000 UTC m=+107.519321172" watchObservedRunningTime="2026-01-30 00:11:43.518191745 +0000 UTC m=+107.532254204" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.520944 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns-operator\"/\"dns-operator-dockercfg-wbbsn\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.521167 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns-operator\"/\"metrics-tls\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.521202 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.521383 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.532434 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.532556 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-464rn\" (UniqueName: \"kubernetes.io/projected/695c79c0-2a93-4c7c-8bf5-660b128f6581-kube-api-access-464rn\") pod \"kube-storage-version-migrator-operator-565b79b866-jx5df\" (UID: \"695c79c0-2a93-4c7c-8bf5-660b128f6581\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.532583 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/695c79c0-2a93-4c7c-8bf5-660b128f6581-serving-cert\") pod \"kube-storage-version-migrator-operator-565b79b866-jx5df\" (UID: \"695c79c0-2a93-4c7c-8bf5-660b128f6581\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.532726 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.032678533 +0000 UTC m=+108.046740992 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.533026 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/695c79c0-2a93-4c7c-8bf5-660b128f6581-config\") pod \"kube-storage-version-migrator-operator-565b79b866-jx5df\" (UID: \"695c79c0-2a93-4c7c-8bf5-660b128f6581\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.643238 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndm2n\" (UniqueName: \"kubernetes.io/projected/1375f37f-3c35-4d19-ade4-559ffe4d22aa-kube-api-access-ndm2n\") pod \"dns-operator-799b87ffcd-bzgsm\" (UID: \"1375f37f-3c35-4d19-ade4-559ffe4d22aa\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.643342 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/1375f37f-3c35-4d19-ade4-559ffe4d22aa-tmp-dir\") pod \"dns-operator-799b87ffcd-bzgsm\" (UID: \"1375f37f-3c35-4d19-ade4-559ffe4d22aa\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.643368 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-464rn\" (UniqueName: \"kubernetes.io/projected/695c79c0-2a93-4c7c-8bf5-660b128f6581-kube-api-access-464rn\") pod \"kube-storage-version-migrator-operator-565b79b866-jx5df\" (UID: \"695c79c0-2a93-4c7c-8bf5-660b128f6581\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.643418 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/695c79c0-2a93-4c7c-8bf5-660b128f6581-serving-cert\") pod \"kube-storage-version-migrator-operator-565b79b866-jx5df\" (UID: \"695c79c0-2a93-4c7c-8bf5-660b128f6581\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.643448 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.643486 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1375f37f-3c35-4d19-ade4-559ffe4d22aa-metrics-tls\") pod \"dns-operator-799b87ffcd-bzgsm\" (UID: \"1375f37f-3c35-4d19-ade4-559ffe4d22aa\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.643541 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/695c79c0-2a93-4c7c-8bf5-660b128f6581-config\") pod \"kube-storage-version-migrator-operator-565b79b866-jx5df\" (UID: \"695c79c0-2a93-4c7c-8bf5-660b128f6581\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.644210 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/695c79c0-2a93-4c7c-8bf5-660b128f6581-config\") pod \"kube-storage-version-migrator-operator-565b79b866-jx5df\" (UID: \"695c79c0-2a93-4c7c-8bf5-660b128f6581\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.645117 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.14509783 +0000 UTC m=+108.159160289 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.652731 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/695c79c0-2a93-4c7c-8bf5-660b128f6581-serving-cert\") pod \"kube-storage-version-migrator-operator-565b79b866-jx5df\" (UID: \"695c79c0-2a93-4c7c-8bf5-660b128f6581\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.664482 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-464rn\" (UniqueName: \"kubernetes.io/projected/695c79c0-2a93-4c7c-8bf5-660b128f6581-kube-api-access-464rn\") pod \"kube-storage-version-migrator-operator-565b79b866-jx5df\" (UID: \"695c79c0-2a93-4c7c-8bf5-660b128f6581\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: W0130 00:11:43.698476 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2888bde6_bdde_4277_b478_2557c52cd1e2.slice/crio-29ea6c10224b3075fc9c591a9b36185aca488a174e00900c2ff83ee6be3a655d WatchSource:0}: Error finding container 29ea6c10224b3075fc9c591a9b36185aca488a174e00900c2ff83ee6be3a655d: Status 404 returned error can't find the container with id 29ea6c10224b3075fc9c591a9b36185aca488a174e00900c2ff83ee6be3a655d Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.716702 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.718816 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler-operator\"/\"kube-scheduler-operator-serving-cert\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.719446 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-dockercfg-2wbn2\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.721798 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-config\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.721883 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.727225 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.728864 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42"] Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.744475 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.744627 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1375f37f-3c35-4d19-ade4-559ffe4d22aa-metrics-tls\") pod \"dns-operator-799b87ffcd-bzgsm\" (UID: \"1375f37f-3c35-4d19-ade4-559ffe4d22aa\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.744707 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-ndm2n\" (UniqueName: \"kubernetes.io/projected/1375f37f-3c35-4d19-ade4-559ffe4d22aa-kube-api-access-ndm2n\") pod \"dns-operator-799b87ffcd-bzgsm\" (UID: \"1375f37f-3c35-4d19-ade4-559ffe4d22aa\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.744740 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.244722519 +0000 UTC m=+108.258784978 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.744819 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/1375f37f-3c35-4d19-ade4-559ffe4d22aa-tmp-dir\") pod \"dns-operator-799b87ffcd-bzgsm\" (UID: \"1375f37f-3c35-4d19-ade4-559ffe4d22aa\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.745202 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/1375f37f-3c35-4d19-ade4-559ffe4d22aa-tmp-dir\") pod \"dns-operator-799b87ffcd-bzgsm\" (UID: \"1375f37f-3c35-4d19-ade4-559ffe4d22aa\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.750551 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1375f37f-3c35-4d19-ade4-559ffe4d22aa-metrics-tls\") pod \"dns-operator-799b87ffcd-bzgsm\" (UID: \"1375f37f-3c35-4d19-ade4-559ffe4d22aa\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.760893 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndm2n\" (UniqueName: \"kubernetes.io/projected/1375f37f-3c35-4d19-ade4-559ffe4d22aa-kube-api-access-ndm2n\") pod \"dns-operator-799b87ffcd-bzgsm\" (UID: \"1375f37f-3c35-4d19-ade4-559ffe4d22aa\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.832807 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.845972 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-config\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.846830 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-serving-cert\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.846957 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-kube-api-access\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.847074 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-tmp\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.847227 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.847940 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.347923703 +0000 UTC m=+108.361986162 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: W0130 00:11:43.905680 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod695c79c0_2a93_4c7c_8bf5_660b128f6581.slice/crio-3ffb719099607e14df9daedae7ea348760be7f2157474e428b1d58e735c3905f WatchSource:0}: Error finding container 3ffb719099607e14df9daedae7ea348760be7f2157474e428b1d58e735c3905f: Status 404 returned error can't find the container with id 3ffb719099607e14df9daedae7ea348760be7f2157474e428b1d58e735c3905f Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.950403 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.950458 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" event={"ID":"8168819b-6cca-4680-a37d-ade6172d7778","Type":"ContainerStarted","Data":"499a856683a97ce279a85b1f1235dd9123c3cce8869f3cf44dc5df6029257e48"} Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.950495 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29495520-tvgs8" event={"ID":"31630771-ce1a-4418-89ce-c58d6bf3c61f","Type":"ContainerStarted","Data":"a54c15a37dfe3ba62cfd506d684379fd8c837b8f83f7249906f7a4cd352af63e"} Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.950507 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29495520-tvgs8" event={"ID":"31630771-ce1a-4418-89ce-c58d6bf3c61f","Type":"ContainerStarted","Data":"21984f541a7c8b0c4c6a3160111a903810522908fb79b27cfc59d522f3a61a97"} Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.950536 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-66458b6674-w67fs"] Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.950554 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.450527044 +0000 UTC m=+108.464589503 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.950652 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.950766 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-tmp\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.950870 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.950922 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-config\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.950985 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-serving-cert\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.951022 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-kube-api-access\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.951582 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-tmp\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: E0130 00:11:43.951822 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.451809504 +0000 UTC m=+108.465871963 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.953053 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-config\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.958211 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-dockercfg-tnfx9\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.958279 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-serving-cert\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.958214 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.958647 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-config\"" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.959709 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-serving-cert\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.964544 5119 patch_prober.go:28] interesting pod/controller-manager-65b6cccf98-6x6hj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.964604 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" podUID="8168819b-6cca-4680-a37d-ade6172d7778" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Jan 30 00:11:43 crc kubenswrapper[5119]: I0130 00:11:43.978027 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0d4dbd80-a4d6-46fb-a310-97df8cf65bc9-kube-api-access\") pod \"openshift-kube-scheduler-operator-54f497555d-2c878\" (UID: \"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.028331 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.028630 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-6x6hj"] Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.028489 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.028731 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" podStartSLOduration=88.028723436 podStartE2EDuration="1m28.028723436s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:43.982950334 +0000 UTC m=+107.997012793" watchObservedRunningTime="2026-01-30 00:11:44.028723436 +0000 UTC m=+108.042785895" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.028645 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-68cf44c8b8-tcmzw"] Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.031955 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-router-certs\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.032382 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"openshift-service-ca.crt\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.032574 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-error\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.032776 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"kube-root-ca.crt\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.032933 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"audit\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.033103 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"oauth-openshift-dockercfg-d2bf2\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.033267 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-service-ca\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.033514 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-idp-0-file-data\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.033664 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-login\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.033690 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-cliconfig\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.033884 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-session\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.034036 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-serving-cert\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.035146 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-provider-selection\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.038100 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-trusted-ca-bundle\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.047841 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.048701 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-ocp-branding-template\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.052365 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.052659 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/efe05509-fd88-45b1-8393-fad7b7758f9b-serving-cert\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.052748 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efe05509-fd88-45b1-8393-fad7b7758f9b-kube-api-access\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.052780 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efe05509-fd88-45b1-8393-fad7b7758f9b-config\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.052929 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/efe05509-fd88-45b1-8393-fad7b7758f9b-tmp-dir\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.053770 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.553751649 +0000 UTC m=+108.567814108 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.138699 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv"] Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.138752 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc"] Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.152772 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-pruner-29495520-tvgs8" podStartSLOduration=89.152753142 podStartE2EDuration="1m29.152753142s" podCreationTimestamp="2026-01-30 00:10:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:44.081602289 +0000 UTC m=+108.095664748" watchObservedRunningTime="2026-01-30 00:11:44.152753142 +0000 UTC m=+108.166815601" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.155595 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-router-certs\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.155630 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.155655 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/efe05509-fd88-45b1-8393-fad7b7758f9b-tmp-dir\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.155675 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-session\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.155696 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/efe05509-fd88-45b1-8393-fad7b7758f9b-serving-cert\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.155713 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-audit-policies\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.155782 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efe05509-fd88-45b1-8393-fad7b7758f9b-config\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.155812 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.155848 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156099 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156142 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6e32c931-da87-4115-8257-185ed217e76a-audit-dir\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156169 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dxxx\" (UniqueName: \"kubernetes.io/projected/6e32c931-da87-4115-8257-185ed217e76a-kube-api-access-8dxxx\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156192 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156224 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efe05509-fd88-45b1-8393-fad7b7758f9b-kube-api-access\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156291 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156310 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-service-ca\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156338 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-login\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156356 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-error\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156383 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.156728 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/efe05509-fd88-45b1-8393-fad7b7758f9b-tmp-dir\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.156969 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.656956454 +0000 UTC m=+108.671018913 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.158032 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efe05509-fd88-45b1-8393-fad7b7758f9b-config\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.161534 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/efe05509-fd88-45b1-8393-fad7b7758f9b-serving-cert\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.172720 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efe05509-fd88-45b1-8393-fad7b7758f9b-kube-api-access\") pod \"kube-controller-manager-operator-69d5f845f8-xcr42\" (UID: \"efe05509-fd88-45b1-8393-fad7b7758f9b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.216175 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" event={"ID":"682184a8-29d6-4081-99ac-9d5989e169ab","Type":"ContainerStarted","Data":"cf951f88577db5222dd55f6a5b88ccb8b271528c17e801fe57a3389f62615042"} Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.216217 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" event={"ID":"4e4d65fd-a484-4711-a91f-cd04e6dfa00a","Type":"ContainerStarted","Data":"5fc2b337164dbd41b5f91a9931772cbb0c860d3fae57ef71077ccf4fa3748426"} Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.216229 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" event={"ID":"0c6013b9-649a-4f77-a54b-c272bbbdf392","Type":"ContainerStarted","Data":"cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253"} Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.216240 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" event={"ID":"f8304fa9-6220-4bf9-a154-177628944fc1","Type":"ContainerStarted","Data":"14a6018aca423eac1c27c23f9f0b95f486d8a25e88951f032246a1c32e74dd9a"} Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.216254 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29495520-tvgs8"] Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.216269 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt"] Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.217779 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.220941 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-metrics-certs-default\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.222105 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"kube-root-ca.crt\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.222201 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-certs-default\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.222204 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-stats-default\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.222584 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-dockercfg-kw8fx\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.222768 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"service-ca-bundle\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.222923 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"openshift-service-ca.crt\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.260592 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.260808 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.760783263 +0000 UTC m=+108.774845722 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.260895 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.260932 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.260953 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.260997 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4n5j\" (UniqueName: \"kubernetes.io/projected/b7507dbb-ef53-4022-a311-17ba6d8b37a8-kube-api-access-x4n5j\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.261075 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6e32c931-da87-4115-8257-185ed217e76a-audit-dir\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.261149 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6e32c931-da87-4115-8257-185ed217e76a-audit-dir\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.264504 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.264777 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.264830 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-8dxxx\" (UniqueName: \"kubernetes.io/projected/6e32c931-da87-4115-8257-185ed217e76a-kube-api-access-8dxxx\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.264882 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.264936 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b7507dbb-ef53-4022-a311-17ba6d8b37a8-service-ca-bundle\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.264961 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b7507dbb-ef53-4022-a311-17ba6d8b37a8-stats-auth\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.264994 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.265017 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-service-ca\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.265047 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-login\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.265068 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-error\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.265099 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.265982 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-service-ca\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.266167 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.266242 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.766230184 +0000 UTC m=+108.780292643 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.267736 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-login\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.268573 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.265122 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7507dbb-ef53-4022-a311-17ba6d8b37a8-metrics-certs\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.268711 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-router-certs\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.268738 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.268768 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-session\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.268793 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-audit-policies\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.268861 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b7507dbb-ef53-4022-a311-17ba6d8b37a8-default-certificate\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.270063 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-error\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.270733 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-audit-policies\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.273956 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-session\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.274420 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.275675 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.280731 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-router-certs\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.282575 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dxxx\" (UniqueName: \"kubernetes.io/projected/6e32c931-da87-4115-8257-185ed217e76a-kube-api-access-8dxxx\") pod \"oauth-openshift-66458b6674-w67fs\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.369671 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.370197 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b7507dbb-ef53-4022-a311-17ba6d8b37a8-default-certificate\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.370261 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-x4n5j\" (UniqueName: \"kubernetes.io/projected/b7507dbb-ef53-4022-a311-17ba6d8b37a8-kube-api-access-x4n5j\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.370287 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98n5f\" (UniqueName: \"kubernetes.io/projected/1856af5f-e69c-4379-a007-f30a582f28d1-kube-api-access-98n5f\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.370334 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1856af5f-e69c-4379-a007-f30a582f28d1-profile-collector-cert\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.370461 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1856af5f-e69c-4379-a007-f30a582f28d1-tmpfs\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.370522 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b7507dbb-ef53-4022-a311-17ba6d8b37a8-service-ca-bundle\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.370549 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b7507dbb-ef53-4022-a311-17ba6d8b37a8-stats-auth\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.370598 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7507dbb-ef53-4022-a311-17ba6d8b37a8-metrics-certs\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.370662 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1856af5f-e69c-4379-a007-f30a582f28d1-srv-cert\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.370777 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.870759231 +0000 UTC m=+108.884821690 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.373165 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b7507dbb-ef53-4022-a311-17ba6d8b37a8-service-ca-bundle\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.379681 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b7507dbb-ef53-4022-a311-17ba6d8b37a8-default-certificate\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.380256 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b7507dbb-ef53-4022-a311-17ba6d8b37a8-stats-auth\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.381101 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" event={"ID":"8168819b-6cca-4680-a37d-ade6172d7778","Type":"ContainerStarted","Data":"ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505"} Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.381146 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf"] Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.381346 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.381499 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.383097 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7507dbb-ef53-4022-a311-17ba6d8b37a8-metrics-certs\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.383669 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"catalog-operator-serving-cert\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.383939 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"control-plane-machine-set-operator-tls\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.384112 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"olm-operator-serviceaccount-dockercfg-4gqzj\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.385881 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"control-plane-machine-set-operator-dockercfg-gnx66\"" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.392600 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.397210 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4n5j\" (UniqueName: \"kubernetes.io/projected/b7507dbb-ef53-4022-a311-17ba6d8b37a8-kube-api-access-x4n5j\") pod \"router-default-68cf44c8b8-tcmzw\" (UID: \"b7507dbb-ef53-4022-a311-17ba6d8b37a8\") " pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.401007 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.419183 5119 generic.go:358] "Generic (PLEG): container finished" podID="c423b388-bcb0-40a3-9889-9ec109779849" containerID="3f0b6f28e0d6d7e31f56c250815fc5b38578601a891f69a0c45dbfe85386f9e4" exitCode=0 Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.472573 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6f3ae473-7d01-46db-ad58-f27062e82346-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-75ffdb6fcd-tnqmt\" (UID: \"6f3ae473-7d01-46db-ad58-f27062e82346\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.472620 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1856af5f-e69c-4379-a007-f30a582f28d1-srv-cert\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.472888 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-98n5f\" (UniqueName: \"kubernetes.io/projected/1856af5f-e69c-4379-a007-f30a582f28d1-kube-api-access-98n5f\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.473015 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s844m\" (UniqueName: \"kubernetes.io/projected/6f3ae473-7d01-46db-ad58-f27062e82346-kube-api-access-s844m\") pod \"control-plane-machine-set-operator-75ffdb6fcd-tnqmt\" (UID: \"6f3ae473-7d01-46db-ad58-f27062e82346\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.473058 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1856af5f-e69c-4379-a007-f30a582f28d1-profile-collector-cert\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.473108 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.473162 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1856af5f-e69c-4379-a007-f30a582f28d1-tmpfs\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.475092 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1856af5f-e69c-4379-a007-f30a582f28d1-tmpfs\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.475482 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.975463762 +0000 UTC m=+108.989526221 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.479027 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1856af5f-e69c-4379-a007-f30a582f28d1-profile-collector-cert\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.480885 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1856af5f-e69c-4379-a007-f30a582f28d1-srv-cert\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.494322 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-98n5f\" (UniqueName: \"kubernetes.io/projected/1856af5f-e69c-4379-a007-f30a582f28d1-kube-api-access-98n5f\") pod \"catalog-operator-75ff9f647d-c9qgc\" (UID: \"1856af5f-e69c-4379-a007-f30a582f28d1\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.529797 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:44 crc kubenswrapper[5119]: W0130 00:11:44.549736 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb7507dbb_ef53_4022_a311_17ba6d8b37a8.slice/crio-470b2591c72dca4ee53d67beee652582db5d81ba40a81328985d4ea0b3394931 WatchSource:0}: Error finding container 470b2591c72dca4ee53d67beee652582db5d81ba40a81328985d4ea0b3394931: Status 404 returned error can't find the container with id 470b2591c72dca4ee53d67beee652582db5d81ba40a81328985d4ea0b3394931 Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.573797 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.573985 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.073954083 +0000 UTC m=+109.088016542 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.574096 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.574233 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6f3ae473-7d01-46db-ad58-f27062e82346-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-75ffdb6fcd-tnqmt\" (UID: \"6f3ae473-7d01-46db-ad58-f27062e82346\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.574434 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-s844m\" (UniqueName: \"kubernetes.io/projected/6f3ae473-7d01-46db-ad58-f27062e82346-kube-api-access-s844m\") pod \"control-plane-machine-set-operator-75ffdb6fcd-tnqmt\" (UID: \"6f3ae473-7d01-46db-ad58-f27062e82346\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.574508 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.074492606 +0000 UTC m=+109.088555065 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: W0130 00:11:44.576438 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e32c931_da87_4115_8257_185ed217e76a.slice/crio-fe0907d28bfe4b6044528c5dac77d6d76289eec13cc0aad81e416d0aae0b7485 WatchSource:0}: Error finding container fe0907d28bfe4b6044528c5dac77d6d76289eec13cc0aad81e416d0aae0b7485: Status 404 returned error can't find the container with id fe0907d28bfe4b6044528c5dac77d6d76289eec13cc0aad81e416d0aae0b7485 Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.578860 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6f3ae473-7d01-46db-ad58-f27062e82346-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-75ffdb6fcd-tnqmt\" (UID: \"6f3ae473-7d01-46db-ad58-f27062e82346\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.590770 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-s844m\" (UniqueName: \"kubernetes.io/projected/6f3ae473-7d01-46db-ad58-f27062e82346-kube-api-access-s844m\") pod \"control-plane-machine-set-operator-75ffdb6fcd-tnqmt\" (UID: \"6f3ae473-7d01-46db-ad58-f27062e82346\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" Jan 30 00:11:44 crc kubenswrapper[5119]: W0130 00:11:44.593508 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podefe05509_fd88_45b1_8393_fad7b7758f9b.slice/crio-5bc31df1e4e1970c91a9e328fd31f38be850fb0e78db070fff15248c0b0c5dc0 WatchSource:0}: Error finding container 5bc31df1e4e1970c91a9e328fd31f38be850fb0e78db070fff15248c0b0c5dc0: Status 404 returned error can't find the container with id 5bc31df1e4e1970c91a9e328fd31f38be850fb0e78db070fff15248c0b0c5dc0 Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.676011 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.676472 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.176452931 +0000 UTC m=+109.190515390 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.714077 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.718021 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.781247 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.781577 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.281566212 +0000 UTC m=+109.295628661 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.883175 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.883386 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.383363273 +0000 UTC m=+109.397425732 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.883469 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.883852 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.383836984 +0000 UTC m=+109.397899443 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: W0130 00:11:44.890898 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f3ae473_7d01_46db_ad58_f27062e82346.slice/crio-5e27972c8503481ac1507175005deeb0a9d57d47dfc4537ae4be1e162e79efad WatchSource:0}: Error finding container 5e27972c8503481ac1507175005deeb0a9d57d47dfc4537ae4be1e162e79efad: Status 404 returned error can't find the container with id 5e27972c8503481ac1507175005deeb0a9d57d47dfc4537ae4be1e162e79efad Jan 30 00:11:44 crc kubenswrapper[5119]: W0130 00:11:44.910850 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1856af5f_e69c_4379_a007_f30a582f28d1.slice/crio-249a41813581c1777f8ce340abc76fb3ea0f24c52c207c9cc8f0c51c57341a66 WatchSource:0}: Error finding container 249a41813581c1777f8ce340abc76fb3ea0f24c52c207c9cc8f0c51c57341a66: Status 404 returned error can't find the container with id 249a41813581c1777f8ce340abc76fb3ea0f24c52c207c9cc8f0c51c57341a66 Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.984624 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.484604 +0000 UTC m=+109.498666459 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.984622 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5119]: I0130 00:11:44.984994 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:44 crc kubenswrapper[5119]: E0130 00:11:44.985362 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.485348768 +0000 UTC m=+109.499411227 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.085614 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.086019 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.586004351 +0000 UTC m=+109.600066810 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.187752 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.188240 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.688226252 +0000 UTC m=+109.702288711 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.193127 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" event={"ID":"4e4d65fd-a484-4711-a91f-cd04e6dfa00a","Type":"ContainerStarted","Data":"59b9b18c6306f9775dad8524b2919a9074ee9d436eefa67ceb30fe7540c25308"} Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.193202 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-xmw98"] Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.193555 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.200002 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-operator-dockercfg-sw6nc\"" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.200209 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-operator-images\"" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.200424 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"mco-proxy-tls\"" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.289087 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.289362 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.789320486 +0000 UTC m=+109.803382945 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.289459 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/790922bf-8215-4d59-a46e-9035f3be8e32-proxy-tls\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.289518 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/790922bf-8215-4d59-a46e-9035f3be8e32-images\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.289627 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.289662 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktf5x\" (UniqueName: \"kubernetes.io/projected/790922bf-8215-4d59-a46e-9035f3be8e32-kube-api-access-ktf5x\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.289727 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/790922bf-8215-4d59-a46e-9035f3be8e32-auth-proxy-config\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.290026 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.790018813 +0000 UTC m=+109.804081272 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.391015 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.391140 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/790922bf-8215-4d59-a46e-9035f3be8e32-auth-proxy-config\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.391173 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/790922bf-8215-4d59-a46e-9035f3be8e32-proxy-tls\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.391200 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/790922bf-8215-4d59-a46e-9035f3be8e32-images\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.391254 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-ktf5x\" (UniqueName: \"kubernetes.io/projected/790922bf-8215-4d59-a46e-9035f3be8e32-kube-api-access-ktf5x\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.391598 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.891582528 +0000 UTC m=+109.905644987 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.392250 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/790922bf-8215-4d59-a46e-9035f3be8e32-auth-proxy-config\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.392835 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/790922bf-8215-4d59-a46e-9035f3be8e32-images\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.396814 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/790922bf-8215-4d59-a46e-9035f3be8e32-proxy-tls\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.479167 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktf5x\" (UniqueName: \"kubernetes.io/projected/790922bf-8215-4d59-a46e-9035f3be8e32-kube-api-access-ktf5x\") pod \"machine-config-operator-67c9d58cbb-2jnpf\" (UID: \"790922bf-8215-4d59-a46e-9035f3be8e32\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.492603 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.492887 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.992875167 +0000 UTC m=+110.006937626 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.516217 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.593599 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.593793 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.093764536 +0000 UTC m=+110.107826995 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.594169 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.594602 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.094591306 +0000 UTC m=+110.108653835 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.694927 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.695095 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.195063425 +0000 UTC m=+110.209125904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.695299 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.695655 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.195640919 +0000 UTC m=+110.209703378 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.783555 5119 patch_prober.go:28] interesting pod/console-operator-67c89758df-l9h6v container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.783629 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-67c89758df-l9h6v" podUID="4b0f3995-a41a-43fe-9811-2a31b2e34c81" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.797370 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.797540 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.297509372 +0000 UTC m=+110.311571831 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.797744 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.798052 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.298039684 +0000 UTC m=+110.312102143 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.816347 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.819992 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"openshift-service-ca.crt\"" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.820477 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"marketplace-operator-metrics\"" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.820930 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"kube-root-ca.crt\"" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.821072 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"marketplace-operator-dockercfg-2cfkp\"" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.823297 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q"] Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.824831 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" podStartSLOduration=90.824812599 podStartE2EDuration="1m30.824812599s" podCreationTimestamp="2026-01-30 00:10:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.805800751 +0000 UTC m=+109.819863220" watchObservedRunningTime="2026-01-30 00:11:45.824812599 +0000 UTC m=+109.838875068" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.825621 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-67c89758df-l9h6v" podStartSLOduration=89.825612948 podStartE2EDuration="1m29.825612948s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.787539542 +0000 UTC m=+109.801602031" watchObservedRunningTime="2026-01-30 00:11:45.825612948 +0000 UTC m=+109.839675417" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.826690 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"marketplace-trusted-ca\"" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.861403 5119 patch_prober.go:28] interesting pod/controller-manager-65b6cccf98-6x6hj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.861445 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" podUID="8168819b-6cca-4680-a37d-ade6172d7778" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Jan 30 00:11:45 crc kubenswrapper[5119]: I0130 00:11:45.898983 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5119]: E0130 00:11:45.899665 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.3996363 +0000 UTC m=+110.413698749 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.000675 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e627abc4-228d-4133-8f48-393e979d9826-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.000788 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e627abc4-228d-4133-8f48-393e979d9826-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.001005 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlbrs\" (UniqueName: \"kubernetes.io/projected/e627abc4-228d-4133-8f48-393e979d9826-kube-api-access-xlbrs\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.001042 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e627abc4-228d-4133-8f48-393e979d9826-tmp\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.001084 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.002011 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.004408 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.004779 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" podStartSLOduration=91.004766002 podStartE2EDuration="1m31.004766002s" podCreationTimestamp="2026-01-30 00:10:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.873595474 +0000 UTC m=+109.887657933" watchObservedRunningTime="2026-01-30 00:11:46.004766002 +0000 UTC m=+110.018828461" Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.005214 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.505199702 +0000 UTC m=+110.519262271 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.006842 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"package-server-manager-serving-cert\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.082775 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-755bb95488-sxw6b"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.082822 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.082961 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.082963 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.083096 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.088498 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator\"/\"kube-root-ca.crt\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.088592 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator\"/\"kube-storage-version-migrator-sa-dockercfg-kknhg\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.088894 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.102652 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.102816 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e627abc4-228d-4133-8f48-393e979d9826-tmp\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.103090 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/04691874-6d27-40cd-a0c5-b5b3075a3327-package-server-manager-serving-cert\") pod \"package-server-manager-77f986bd66-wmt9q\" (UID: \"04691874-6d27-40cd-a0c5-b5b3075a3327\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.103129 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwrzd\" (UniqueName: \"kubernetes.io/projected/04691874-6d27-40cd-a0c5-b5b3075a3327-kube-api-access-nwrzd\") pod \"package-server-manager-77f986bd66-wmt9q\" (UID: \"04691874-6d27-40cd-a0c5-b5b3075a3327\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.103178 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e627abc4-228d-4133-8f48-393e979d9826-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.103377 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e627abc4-228d-4133-8f48-393e979d9826-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.103613 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-xlbrs\" (UniqueName: \"kubernetes.io/projected/e627abc4-228d-4133-8f48-393e979d9826-kube-api-access-xlbrs\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.104036 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.604016881 +0000 UTC m=+110.618079420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.104543 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e627abc4-228d-4133-8f48-393e979d9826-tmp\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.106354 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e627abc4-228d-4133-8f48-393e979d9826-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.110286 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e627abc4-228d-4133-8f48-393e979d9826-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.126637 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlbrs\" (UniqueName: \"kubernetes.io/projected/e627abc4-228d-4133-8f48-393e979d9826-kube-api-access-xlbrs\") pod \"marketplace-operator-547dbd544d-xmw98\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.136125 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.205312 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.205839 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.705825562 +0000 UTC m=+110.719888021 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.206307 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8bc2\" (UniqueName: \"kubernetes.io/projected/f6110d7d-2373-491a-89f6-6e2068b759fe-kube-api-access-d8bc2\") pod \"migrator-866fcbc849-4trwn\" (UID: \"f6110d7d-2373-491a-89f6-6e2068b759fe\") " pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.206341 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/04691874-6d27-40cd-a0c5-b5b3075a3327-package-server-manager-serving-cert\") pod \"package-server-manager-77f986bd66-wmt9q\" (UID: \"04691874-6d27-40cd-a0c5-b5b3075a3327\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.207035 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-nwrzd\" (UniqueName: \"kubernetes.io/projected/04691874-6d27-40cd-a0c5-b5b3075a3327-kube-api-access-nwrzd\") pod \"package-server-manager-77f986bd66-wmt9q\" (UID: \"04691874-6d27-40cd-a0c5-b5b3075a3327\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.212788 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/04691874-6d27-40cd-a0c5-b5b3075a3327-package-server-manager-serving-cert\") pod \"package-server-manager-77f986bd66-wmt9q\" (UID: \"04691874-6d27-40cd-a0c5-b5b3075a3327\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.227265 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwrzd\" (UniqueName: \"kubernetes.io/projected/04691874-6d27-40cd-a0c5-b5b3075a3327-kube-api-access-nwrzd\") pod \"package-server-manager-77f986bd66-wmt9q\" (UID: \"04691874-6d27-40cd-a0c5-b5b3075a3327\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.288717 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-67c89758df-l9h6v"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.288805 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.288819 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.289008 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.301733 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca-operator\"/\"service-ca-operator-dockercfg-bjqfd\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.301972 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"service-ca-operator-config\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.302103 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca-operator\"/\"serving-cert\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.302220 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.302406 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.307957 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.308117 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.808093555 +0000 UTC m=+110.822156014 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.308253 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.308589 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-d8bc2\" (UniqueName: \"kubernetes.io/projected/f6110d7d-2373-491a-89f6-6e2068b759fe-kube-api-access-d8bc2\") pod \"migrator-866fcbc849-4trwn\" (UID: \"f6110d7d-2373-491a-89f6-6e2068b759fe\") " pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn" Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.308758 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.80874824 +0000 UTC m=+110.822810709 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.327607 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.341900 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8bc2\" (UniqueName: \"kubernetes.io/projected/f6110d7d-2373-491a-89f6-6e2068b759fe-kube-api-access-d8bc2\") pod \"migrator-866fcbc849-4trwn\" (UID: \"f6110d7d-2373-491a-89f6-6e2068b759fe\") " pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.359185 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.359569 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.370310 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"packageserver-service-cert\"" Jan 30 00:11:46 crc kubenswrapper[5119]: W0130 00:11:46.384820 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode627abc4_228d_4133_8f48_393e979d9826.slice/crio-81fd8f10a2d32aa052b0316b43938ead36bbe778dae37e2c029d9767023f02d9 WatchSource:0}: Error finding container 81fd8f10a2d32aa052b0316b43938ead36bbe778dae37e2c029d9767023f02d9: Status 404 returned error can't find the container with id 81fd8f10a2d32aa052b0316b43938ead36bbe778dae37e2c029d9767023f02d9 Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.409620 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.409742 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jqks\" (UniqueName: \"kubernetes.io/projected/9ed3b66f-12b9-4481-b6be-2d259c030348-kube-api-access-9jqks\") pod \"service-ca-operator-5b9c976747-rlmp7\" (UID: \"9ed3b66f-12b9-4481-b6be-2d259c030348\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.409829 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.909797323 +0000 UTC m=+110.923859782 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.409949 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ed3b66f-12b9-4481-b6be-2d259c030348-serving-cert\") pod \"service-ca-operator-5b9c976747-rlmp7\" (UID: \"9ed3b66f-12b9-4481-b6be-2d259c030348\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.410072 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ed3b66f-12b9-4481-b6be-2d259c030348-config\") pod \"service-ca-operator-5b9c976747-rlmp7\" (UID: \"9ed3b66f-12b9-4481-b6be-2d259c030348\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.424694 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.454880 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" event={"ID":"4363b09f-d35c-47ec-b96d-c9437ccf2206","Type":"ContainerDied","Data":"a426c347164125304d2bbf0fc4c2cdd8f387f9f6a1e4f4b07a89acbb2dd08d7d"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.454928 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-74545575db-twjcj"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.455979 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.459335 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"olm-operator-serving-cert\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.510789 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ed3b66f-12b9-4481-b6be-2d259c030348-config\") pod \"service-ca-operator-5b9c976747-rlmp7\" (UID: \"9ed3b66f-12b9-4481-b6be-2d259c030348\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.510869 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-apiservice-cert\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.510896 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-9jqks\" (UniqueName: \"kubernetes.io/projected/9ed3b66f-12b9-4481-b6be-2d259c030348-kube-api-access-9jqks\") pod \"service-ca-operator-5b9c976747-rlmp7\" (UID: \"9ed3b66f-12b9-4481-b6be-2d259c030348\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.510920 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.510958 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkld5\" (UniqueName: \"kubernetes.io/projected/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-kube-api-access-lkld5\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.510984 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ed3b66f-12b9-4481-b6be-2d259c030348-serving-cert\") pod \"service-ca-operator-5b9c976747-rlmp7\" (UID: \"9ed3b66f-12b9-4481-b6be-2d259c030348\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.511006 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-tmpfs\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.511030 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-webhook-cert\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.511321 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.011310907 +0000 UTC m=+111.025373366 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.516118 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ed3b66f-12b9-4481-b6be-2d259c030348-config\") pod \"service-ca-operator-5b9c976747-rlmp7\" (UID: \"9ed3b66f-12b9-4481-b6be-2d259c030348\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.522276 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ed3b66f-12b9-4481-b6be-2d259c030348-serving-cert\") pod \"service-ca-operator-5b9c976747-rlmp7\" (UID: \"9ed3b66f-12b9-4481-b6be-2d259c030348\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.531229 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jqks\" (UniqueName: \"kubernetes.io/projected/9ed3b66f-12b9-4481-b6be-2d259c030348-kube-api-access-9jqks\") pod \"service-ca-operator-5b9c976747-rlmp7\" (UID: \"9ed3b66f-12b9-4481-b6be-2d259c030348\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.589933 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-69db94689b-xxmhj"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.590353 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.593731 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"openshift-service-ca.crt\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.594142 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"kube-root-ca.crt\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.594290 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"signing-cabundle\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.594691 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca\"/\"signing-key\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.594717 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca\"/\"service-ca-dockercfg-bgxvm\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.603562 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.607508 5119 patch_prober.go:28] interesting pod/console-operator-67c89758df-l9h6v container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.607548 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-67c89758df-l9h6v" podUID="4b0f3995-a41a-43fe-9811-2a31b2e34c81" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.613984 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.614089 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-apiservice-cert\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.614115 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-tmpfs\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.614146 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-profile-collector-cert\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.614184 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfs44\" (UniqueName: \"kubernetes.io/projected/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-kube-api-access-lfs44\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.614201 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-lkld5\" (UniqueName: \"kubernetes.io/projected/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-kube-api-access-lkld5\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.614235 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-tmpfs\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.614259 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-webhook-cert\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.614277 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-srv-cert\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.614747 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.114723277 +0000 UTC m=+111.128785736 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.615419 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-tmpfs\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.628932 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-webhook-cert\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.629136 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-apiservice-cert\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.636431 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkld5\" (UniqueName: \"kubernetes.io/projected/c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb-kube-api-access-lkld5\") pod \"packageserver-7d4fc7d867-tgxk7\" (UID: \"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.674446 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:46 crc kubenswrapper[5119]: W0130 00:11:46.695560 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04691874_6d27_40cd_a0c5_b5b3075a3327.slice/crio-24bb7d17a4c76dbf399b7caff72d1b7a16af2bf4bb100747b11d73b138f9e0f6 WatchSource:0}: Error finding container 24bb7d17a4c76dbf399b7caff72d1b7a16af2bf4bb100747b11d73b138f9e0f6: Status 404 returned error can't find the container with id 24bb7d17a4c76dbf399b7caff72d1b7a16af2bf4bb100747b11d73b138f9e0f6 Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.717649 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-srv-cert\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.717761 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/db680431-c513-449a-a2d0-2df450dbbb9c-signing-key\") pod \"service-ca-74545575db-twjcj\" (UID: \"db680431-c513-449a-a2d0-2df450dbbb9c\") " pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.717835 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq9lc\" (UniqueName: \"kubernetes.io/projected/db680431-c513-449a-a2d0-2df450dbbb9c-kube-api-access-wq9lc\") pod \"service-ca-74545575db-twjcj\" (UID: \"db680431-c513-449a-a2d0-2df450dbbb9c\") " pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.717890 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/db680431-c513-449a-a2d0-2df450dbbb9c-signing-cabundle\") pod \"service-ca-74545575db-twjcj\" (UID: \"db680431-c513-449a-a2d0-2df450dbbb9c\") " pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.717925 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-tmpfs\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.717990 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.718017 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-profile-collector-cert\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.718141 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-lfs44\" (UniqueName: \"kubernetes.io/projected/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-kube-api-access-lfs44\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.719993 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.219976831 +0000 UTC m=+111.234039290 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735549 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-9ddfb9f55-9n8tq"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735587 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" event={"ID":"27037d8c-3db2-4e66-8680-7804a89fd519","Type":"ContainerStarted","Data":"b35526e5b92f62938023f337933ff46e4222f228282bef465dd08ec56a51da49"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735721 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735747 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-747b44746d-nts9m" event={"ID":"ff01653b-8f2a-47a1-ae0c-0ac878c25570","Type":"ContainerStarted","Data":"e23d8a3af5e038e0c13cd66aa8950397c08ee9eea75e43c1b5cb6816cc7e1798"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735810 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735830 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" event={"ID":"67a37ab3-c501-4e8d-96f2-907b849e3856","Type":"ContainerStarted","Data":"3e60a20fba13c41e2a8c3b956e874149880e05f2ceb3cf0df0dc661394f67cae"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735845 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735859 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" event={"ID":"d1469b0b-e1a0-4898-ba56-76ae9cce8867","Type":"ContainerStarted","Data":"cd0e59bddb4f52b997d81a6f7696e95075f8695934bb1a91466c6630f67409e9"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735873 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735885 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-69b85846b6-x62sl"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735900 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" event={"ID":"f65bd14d-60b5-4288-9fa3-6322881a4015","Type":"ContainerStarted","Data":"19f54ba873ad67ee99e9b0bda462aafd65511032b9aefb77a50c3ed6a07ec2f0"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.735911 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.736065 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" event={"ID":"682184a8-29d6-4081-99ac-9d5989e169ab","Type":"ContainerStarted","Data":"ebcdb8d1ae09b5ad0a8f3f67c581aa1c6a6cbf8c77805f12fbacd9e1b7263138"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.736081 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" event={"ID":"7be2f013-d656-48d9-b332-e66e20efa66f","Type":"ContainerStarted","Data":"b52eec21ee8ad5a6f17f3f58c4a9b79f1d460554ed97cbcab203abf492c126b2"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.736095 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" event={"ID":"4e4d65fd-a484-4711-a91f-cd04e6dfa00a","Type":"ContainerDied","Data":"59b9b18c6306f9775dad8524b2919a9074ee9d436eefa67ceb30fe7540c25308"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.736112 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2" event={"ID":"53c88ff0-d29e-4cb6-8904-e1e203848f51","Type":"ContainerStarted","Data":"4c228e9eb45e31c81df3beb9e907f3a4224b945a8d23dad15e83830b5b2b1af8"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.736126 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-64d44f6ddf-fjtqb" event={"ID":"80f4b56b-65a5-40d2-9a12-0465c0ff492c","Type":"ContainerStarted","Data":"7fa713944b143f55708da260bc0f0041147299a32f2c2de3ebe05d9b7de3ceb4"} Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.736143 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-w8qxl"] Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.736700 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-tmpfs\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.737487 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-admission-controller-secret\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.740658 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-profile-collector-cert\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.741129 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-ac-dockercfg-gj7jx\"" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.742993 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-srv-cert\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.752659 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfs44\" (UniqueName: \"kubernetes.io/projected/cb5e6bb2-b3b7-43b9-aae8-2385735815cd-kube-api-access-lfs44\") pod \"olm-operator-5cdf44d969-bt8gg\" (UID: \"cb5e6bb2-b3b7-43b9-aae8-2385735815cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.792954 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.819370 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.820224 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.320202754 +0000 UTC m=+111.334265203 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.820308 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.820619 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/db680431-c513-449a-a2d0-2df450dbbb9c-signing-key\") pod \"service-ca-74545575db-twjcj\" (UID: \"db680431-c513-449a-a2d0-2df450dbbb9c\") " pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.820673 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-wq9lc\" (UniqueName: \"kubernetes.io/projected/db680431-c513-449a-a2d0-2df450dbbb9c-kube-api-access-wq9lc\") pod \"service-ca-74545575db-twjcj\" (UID: \"db680431-c513-449a-a2d0-2df450dbbb9c\") " pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.820703 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/db680431-c513-449a-a2d0-2df450dbbb9c-signing-cabundle\") pod \"service-ca-74545575db-twjcj\" (UID: \"db680431-c513-449a-a2d0-2df450dbbb9c\") " pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.823456 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.323444842 +0000 UTC m=+111.337507301 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.825370 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/db680431-c513-449a-a2d0-2df450dbbb9c-signing-cabundle\") pod \"service-ca-74545575db-twjcj\" (UID: \"db680431-c513-449a-a2d0-2df450dbbb9c\") " pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.841101 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/db680431-c513-449a-a2d0-2df450dbbb9c-signing-key\") pod \"service-ca-74545575db-twjcj\" (UID: \"db680431-c513-449a-a2d0-2df450dbbb9c\") " pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.846604 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq9lc\" (UniqueName: \"kubernetes.io/projected/db680431-c513-449a-a2d0-2df450dbbb9c-kube-api-access-wq9lc\") pod \"service-ca-74545575db-twjcj\" (UID: \"db680431-c513-449a-a2d0-2df450dbbb9c\") " pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.924706 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.924864 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.424836273 +0000 UTC m=+111.438898732 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.925178 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.925209 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5gp4\" (UniqueName: \"kubernetes.io/projected/4ffa4428-a504-40e8-9e04-5b8547bd6875-kube-api-access-t5gp4\") pod \"multus-admission-controller-69db94689b-xxmhj\" (UID: \"4ffa4428-a504-40e8-9e04-5b8547bd6875\") " pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.925256 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ffa4428-a504-40e8-9e04-5b8547bd6875-webhook-certs\") pod \"multus-admission-controller-69db94689b-xxmhj\" (UID: \"4ffa4428-a504-40e8-9e04-5b8547bd6875\") " pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" Jan 30 00:11:46 crc kubenswrapper[5119]: E0130 00:11:46.925491 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.425483929 +0000 UTC m=+111.439546388 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5119]: I0130 00:11:46.939907 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-74545575db-twjcj" Jan 30 00:11:47 crc kubenswrapper[5119]: W0130 00:11:47.006559 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4ad95fd_bcbf_4e24_8df4_dae7f9b018fb.slice/crio-c838b600ec88ba4e3ac27a0bd752cf64a98f8f5c892b81b53de10a14eb052dc9 WatchSource:0}: Error finding container c838b600ec88ba4e3ac27a0bd752cf64a98f8f5c892b81b53de10a14eb052dc9: Status 404 returned error can't find the container with id c838b600ec88ba4e3ac27a0bd752cf64a98f8f5c892b81b53de10a14eb052dc9 Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.026897 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.027293 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-t5gp4\" (UniqueName: \"kubernetes.io/projected/4ffa4428-a504-40e8-9e04-5b8547bd6875-kube-api-access-t5gp4\") pod \"multus-admission-controller-69db94689b-xxmhj\" (UID: \"4ffa4428-a504-40e8-9e04-5b8547bd6875\") " pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.027360 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.52733466 +0000 UTC m=+111.541397119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.027477 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ffa4428-a504-40e8-9e04-5b8547bd6875-webhook-certs\") pod \"multus-admission-controller-69db94689b-xxmhj\" (UID: \"4ffa4428-a504-40e8-9e04-5b8547bd6875\") " pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.045309 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4ffa4428-a504-40e8-9e04-5b8547bd6875-webhook-certs\") pod \"multus-admission-controller-69db94689b-xxmhj\" (UID: \"4ffa4428-a504-40e8-9e04-5b8547bd6875\") " pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.050149 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5gp4\" (UniqueName: \"kubernetes.io/projected/4ffa4428-a504-40e8-9e04-5b8547bd6875-kube-api-access-t5gp4\") pod \"multus-admission-controller-69db94689b-xxmhj\" (UID: \"4ffa4428-a504-40e8-9e04-5b8547bd6875\") " pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.060539 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.131269 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.132040 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.632027521 +0000 UTC m=+111.646089980 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: W0130 00:11:47.178754 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb680431_c513_449a_a2d0_2df450dbbb9c.slice/crio-59d8d168bb907994ebc2dde0bec3caab309a81aace812e9b92c80bdc2d29bb68 WatchSource:0}: Error finding container 59d8d168bb907994ebc2dde0bec3caab309a81aace812e9b92c80bdc2d29bb68: Status 404 returned error can't find the container with id 59d8d168bb907994ebc2dde0bec3caab309a81aace812e9b92c80bdc2d29bb68 Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.232873 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.232988 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.732969081 +0000 UTC m=+111.747031540 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.233525 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.233936 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.733919204 +0000 UTC m=+111.747981663 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.334550 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.334730 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.83470252 +0000 UTC m=+111.848764979 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.335107 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.335504 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.835487669 +0000 UTC m=+111.849550118 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.436519 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.436772 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.936739867 +0000 UTC m=+111.950802326 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.437124 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.437889 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.937880914 +0000 UTC m=+111.951943373 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.539092 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.539284 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.039256835 +0000 UTC m=+112.053319294 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.539846 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.540102 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.040095515 +0000 UTC m=+112.054157974 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.621692 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.622101 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pkvpm"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.623333 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.623564 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-747b44746d-nts9m" podStartSLOduration=91.623534704 podStartE2EDuration="1m31.623534704s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.287172026 +0000 UTC m=+111.301234485" watchObservedRunningTime="2026-01-30 00:11:47.623534704 +0000 UTC m=+111.637597163" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.625828 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"dns-dockercfg-kpvmz\"" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.626263 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"dns-default\"" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.629634 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"dns-default-metrics-tls\"" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.640339 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.640547 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.140517193 +0000 UTC m=+112.154579662 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.640659 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.640990 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.140979164 +0000 UTC m=+112.155041633 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.741515 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.741718 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.241690689 +0000 UTC m=+112.255753148 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.741835 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tw9b\" (UniqueName: \"kubernetes.io/projected/b82c2f8b-0956-4050-b980-2973d235692b-kube-api-access-4tw9b\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.741934 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b82c2f8b-0956-4050-b980-2973d235692b-metrics-tls\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.741971 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/b82c2f8b-0956-4050-b980-2973d235692b-tmp-dir\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.742160 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b82c2f8b-0956-4050-b980-2973d235692b-config-volume\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.742231 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.742604 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.242589261 +0000 UTC m=+112.256651730 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.844595 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.844732 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.344708549 +0000 UTC m=+112.358771008 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.844993 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-4tw9b\" (UniqueName: \"kubernetes.io/projected/b82c2f8b-0956-4050-b980-2973d235692b-kube-api-access-4tw9b\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.845842 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b82c2f8b-0956-4050-b980-2973d235692b-metrics-tls\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.845874 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/b82c2f8b-0956-4050-b980-2973d235692b-tmp-dir\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.847565 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/b82c2f8b-0956-4050-b980-2973d235692b-tmp-dir\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.847775 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b82c2f8b-0956-4050-b980-2973d235692b-config-volume\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.848241 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.348222814 +0000 UTC m=+112.362285273 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.848777 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b82c2f8b-0956-4050-b980-2973d235692b-config-volume\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.848837 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.851926 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b82c2f8b-0956-4050-b980-2973d235692b-metrics-tls\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.878883 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tw9b\" (UniqueName: \"kubernetes.io/projected/b82c2f8b-0956-4050-b980-2973d235692b-kube-api-access-4tw9b\") pod \"dns-default-w8qxl\" (UID: \"b82c2f8b-0956-4050-b980-2973d235692b\") " pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.938411 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-w8qxl" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.950022 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.950146 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.450114767 +0000 UTC m=+112.464177226 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.951413 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:47 crc kubenswrapper[5119]: E0130 00:11:47.951820 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.451786437 +0000 UTC m=+112.465848896 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.965350 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.968894 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"hostpath-provisioner\"/\"csi-hostpath-provisioner-sa-dockercfg-7dcws\"" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.969086 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"hostpath-provisioner\"/\"openshift-service-ca.crt\"" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.969213 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"hostpath-provisioner\"/\"kube-root-ca.crt\"" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979756 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" event={"ID":"16e266d0-7218-4013-a778-df876d9f8270","Type":"ContainerStarted","Data":"005dd0317646e4590aa4cec859cb6f27af9e6dbbe11d8c11256a89c731d90425"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979798 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" event={"ID":"67e5ee6c-a546-4a02-84ad-b736baa67181","Type":"ContainerStarted","Data":"38120b753fb018acb410ae31d045b837e33a4a9e196839e9aeb5a239f333bee5"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979819 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-xmw98"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979836 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979848 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" event={"ID":"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9","Type":"ContainerStarted","Data":"0a36c91ec80f7a9ad761ed260e5df1fbdf11c17f00e50ba116e7ec122d35a631"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979860 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" event={"ID":"f8304fa9-6220-4bf9-a154-177628944fc1","Type":"ContainerStarted","Data":"1554609dd9b6a26b95e7718227b82984ae4a6d98cdf3e79174eb0c6a62876e2e"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979873 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" event={"ID":"c423b388-bcb0-40a3-9889-9ec109779849","Type":"ContainerDied","Data":"3f0b6f28e0d6d7e31f56c250815fc5b38578601a891f69a0c45dbfe85386f9e4"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979891 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979904 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979915 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" event={"ID":"695c79c0-2a93-4c7c-8bf5-660b128f6581","Type":"ContainerStarted","Data":"3ffb719099607e14df9daedae7ea348760be7f2157474e428b1d58e735c3905f"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979927 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" event={"ID":"2888bde6-bdde-4277-b478-2557c52cd1e2","Type":"ContainerStarted","Data":"29ea6c10224b3075fc9c591a9b36185aca488a174e00900c2ff83ee6be3a655d"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979951 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-67c89758df-l9h6v" event={"ID":"4b0f3995-a41a-43fe-9811-2a31b2e34c81","Type":"ContainerStarted","Data":"93e8e7569aebd94d86d026b614538c9cdab41470c32cb985e234954cbf6db127"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979962 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-54c688565-wmrc9" event={"ID":"fa8ee3ba-16b1-45ac-84be-6da36536cc06","Type":"ContainerStarted","Data":"2563de869200aa84d2f2606aa26ebcdc772c384229937d7fd4e627faab1aba6d"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979977 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.979989 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980000 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980012 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-747b44746d-nts9m"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980023 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980034 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" event={"ID":"d1469b0b-e1a0-4898-ba56-76ae9cce8867","Type":"ContainerStarted","Data":"ad7b7113b8ef3870be6dc045c0fa95a4eb2cc1f904270e74cd36305f7dab3437"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980046 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-c5xkv"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980057 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980067 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980078 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" event={"ID":"f65bd14d-60b5-4288-9fa3-6322881a4015","Type":"ContainerStarted","Data":"22dfc212258ad74e78a34d790390b0b652f3d55d9f42572a019fc789d02c5b5a"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980091 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-799b87ffcd-bzgsm"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980104 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" event={"ID":"1375f37f-3c35-4d19-ade4-559ffe4d22aa","Type":"ContainerStarted","Data":"c9762347148f15991662dd61c36467308e641df1b1bcf468ed00869a0c6940f6"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980116 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980128 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" event={"ID":"1758db3d-11c0-41e8-b35c-d9d296ef3b54","Type":"ContainerStarted","Data":"00ced7f729e0cc84c6bd8475d31f1635e7bf90d6e714fe7a2bbe01a095f594c1"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980138 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-64d44f6ddf-fjtqb"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980150 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980160 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980169 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-5777786469-7wrgd"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980180 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" event={"ID":"6f3ae473-7d01-46db-ad58-f27062e82346","Type":"ContainerStarted","Data":"5e27972c8503481ac1507175005deeb0a9d57d47dfc4537ae4be1e162e79efad"} Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.980192 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-lkcvr"] Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.985128 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26" podStartSLOduration=91.985099639 podStartE2EDuration="1m31.985099639s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.819313768 +0000 UTC m=+111.833376237" watchObservedRunningTime="2026-01-30 00:11:47.985099639 +0000 UTC m=+111.999162098" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.985205 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" podStartSLOduration=91.985201942 podStartE2EDuration="1m31.985201942s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.879775603 +0000 UTC m=+111.893838082" watchObservedRunningTime="2026-01-30 00:11:47.985201942 +0000 UTC m=+111.999264401" Jan 30 00:11:47 crc kubenswrapper[5119]: I0130 00:11:47.986531 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-69b85846b6-x62sl" podStartSLOduration=91.986513713 podStartE2EDuration="1m31.986513713s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.858250295 +0000 UTC m=+111.872312754" watchObservedRunningTime="2026-01-30 00:11:47.986513713 +0000 UTC m=+112.000576172" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.008114 5119 patch_prober.go:28] interesting pod/console-operator-67c89758df-l9h6v container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.008161 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-67c89758df-l9h6v" podUID="4b0f3995-a41a-43fe-9811-2a31b2e34c81" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.055907 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.056035 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.556008887 +0000 UTC m=+112.570071346 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.056194 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.056577 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.55655561 +0000 UTC m=+112.570618069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.156912 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.157123 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.6570962 +0000 UTC m=+112.671158659 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.157266 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.157306 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-mountpoint-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.157330 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-socket-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.157534 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-csi-data-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.157632 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-plugins-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.157653 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-registration-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.157705 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbq8f\" (UniqueName: \"kubernetes.io/projected/6de3ce1c-697b-44eb-87b5-7365bab6606b-kube-api-access-xbq8f\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.159079 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.659060618 +0000 UTC m=+112.673123077 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.217642 5119 ???:1] "http: TLS handshake error from 192.168.126.11:43934: no serving certificate available for the kubelet" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.258778 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.258946 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.758921702 +0000 UTC m=+112.772984161 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.259453 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.259604 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-mountpoint-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.259776 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.759767542 +0000 UTC m=+112.773830001 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.259821 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-mountpoint-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.260110 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-socket-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.260238 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-socket-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.260310 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-csi-data-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.260351 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-plugins-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.260374 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-registration-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.260464 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-xbq8f\" (UniqueName: \"kubernetes.io/projected/6de3ce1c-697b-44eb-87b5-7365bab6606b-kube-api-access-xbq8f\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.260536 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-registration-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.260578 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-csi-data-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.260626 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/6de3ce1c-697b-44eb-87b5-7365bab6606b-plugins-dir\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.290177 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbq8f\" (UniqueName: \"kubernetes.io/projected/6de3ce1c-697b-44eb-87b5-7365bab6606b-kube-api-access-xbq8f\") pod \"csi-hostpathplugin-pkvpm\" (UID: \"6de3ce1c-697b-44eb-87b5-7365bab6606b\") " pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.298532 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.305576 5119 ???:1] "http: TLS handshake error from 192.168.126.11:43946: no serving certificate available for the kubelet" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.362213 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.362354 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.862332862 +0000 UTC m=+112.876395331 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.363120 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.363384 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.863374867 +0000 UTC m=+112.877437326 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.393024 5119 ???:1] "http: TLS handshake error from 192.168.126.11:43952: no serving certificate available for the kubelet" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.418051 5119 ???:1] "http: TLS handshake error from 192.168.126.11:43958: no serving certificate available for the kubelet" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.436692 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" event={"ID":"1856af5f-e69c-4379-a007-f30a582f28d1","Type":"ContainerStarted","Data":"249a41813581c1777f8ce340abc76fb3ea0f24c52c207c9cc8f0c51c57341a66"} Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.436749 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-72l9z"] Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.437947 5119 scope.go:117] "RemoveContainer" containerID="b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.438792 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.446734 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"cni-sysctl-allowlist\"" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.447227 5119 ???:1] "http: TLS handshake error from 192.168.126.11:43964: no serving certificate available for the kubelet" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.463900 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.464158 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.964135823 +0000 UTC m=+112.978198282 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.464209 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.464662 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.964652995 +0000 UTC m=+112.978715454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.536993 5119 ???:1] "http: TLS handshake error from 192.168.126.11:43966: no serving certificate available for the kubelet" Jan 30 00:11:48 crc kubenswrapper[5119]: W0130 00:11:48.543650 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6de3ce1c_697b_44eb_87b5_7365bab6606b.slice/crio-e58b76255ca4fc2271f1ad3c3255bfe068a9fe061842072ef1f760d9f5a44137 WatchSource:0}: Error finding container e58b76255ca4fc2271f1ad3c3255bfe068a9fe061842072ef1f760d9f5a44137: Status 404 returned error can't find the container with id e58b76255ca4fc2271f1ad3c3255bfe068a9fe061842072ef1f760d9f5a44137 Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.564842 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.565182 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/32f760a1-1874-479d-810f-ac7785c7b94d-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.565214 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/32f760a1-1874-479d-810f-ac7785c7b94d-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.565231 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/32f760a1-1874-479d-810f-ac7785c7b94d-ready\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.565278 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgwxn\" (UniqueName: \"kubernetes.io/projected/32f760a1-1874-479d-810f-ac7785c7b94d-kube-api-access-wgwxn\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.565369 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.065355019 +0000 UTC m=+113.079417478 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.566314 5119 generic.go:358] "Generic (PLEG): container finished" podID="f8304fa9-6220-4bf9-a154-177628944fc1" containerID="1554609dd9b6a26b95e7718227b82984ae4a6d98cdf3e79174eb0c6a62876e2e" exitCode=0 Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.666295 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/32f760a1-1874-479d-810f-ac7785c7b94d-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.666341 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/32f760a1-1874-479d-810f-ac7785c7b94d-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.666367 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/32f760a1-1874-479d-810f-ac7785c7b94d-ready\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.666466 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-wgwxn\" (UniqueName: \"kubernetes.io/projected/32f760a1-1874-479d-810f-ac7785c7b94d-kube-api-access-wgwxn\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.666603 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/32f760a1-1874-479d-810f-ac7785c7b94d-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.666793 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.666958 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/32f760a1-1874-479d-810f-ac7785c7b94d-ready\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.667141 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.16712546 +0000 UTC m=+113.181187919 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.667251 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/32f760a1-1874-479d-810f-ac7785c7b94d-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.688791 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgwxn\" (UniqueName: \"kubernetes.io/projected/32f760a1-1874-479d-810f-ac7785c7b94d-kube-api-access-wgwxn\") pod \"cni-sysctl-allowlist-ds-lkcvr\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.725260 5119 ???:1] "http: TLS handshake error from 192.168.126.11:43976: no serving certificate available for the kubelet" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.768042 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.768222 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.268193503 +0000 UTC m=+113.282255962 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.768511 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.768844 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.268830368 +0000 UTC m=+113.282892837 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.783736 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.869516 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.869694 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.369665986 +0000 UTC m=+113.383728445 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.869909 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.870194 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.370182699 +0000 UTC m=+113.384245158 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.971137 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.971278 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.471261462 +0000 UTC m=+113.485323921 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5119]: I0130 00:11:48.971456 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:48 crc kubenswrapper[5119]: E0130 00:11:48.971689 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.471682732 +0000 UTC m=+113.485745191 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.072107 5119 ???:1] "http: TLS handshake error from 192.168.126.11:43988: no serving certificate available for the kubelet" Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.072744 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.072899 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.572879149 +0000 UTC m=+113.586941618 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.072955 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.073281 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.573270338 +0000 UTC m=+113.587332797 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.173494 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.173682 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.673654955 +0000 UTC m=+113.687717414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.174124 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.174749 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.674733001 +0000 UTC m=+113.688795460 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.275568 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.275714 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.775692512 +0000 UTC m=+113.789754971 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.276158 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.276489 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.776478441 +0000 UTC m=+113.790540900 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.377122 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.377443 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.877330319 +0000 UTC m=+113.891392778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.479604 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.480115 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.980094733 +0000 UTC m=+113.994157192 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.580668 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.580825 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.080801728 +0000 UTC m=+114.094864187 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.581010 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.581300 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.081288199 +0000 UTC m=+114.095350658 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.681869 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.682015 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.181994814 +0000 UTC m=+114.196057273 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.682228 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.682612 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.182602249 +0000 UTC m=+114.196664708 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.767292 5119 ???:1] "http: TLS handshake error from 192.168.126.11:43992: no serving certificate available for the kubelet" Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.783225 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.783322 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.283303733 +0000 UTC m=+114.297366182 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.783527 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.783794 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.283787915 +0000 UTC m=+114.297850374 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.884865 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.885080 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.385056123 +0000 UTC m=+114.399118592 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.885330 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.885643 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.385635497 +0000 UTC m=+114.399697956 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.986926 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.987139 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.48710608 +0000 UTC m=+114.501168549 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5119]: I0130 00:11:49.987291 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:49 crc kubenswrapper[5119]: E0130 00:11:49.987643 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.487630232 +0000 UTC m=+114.501692691 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.088564 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.088701 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.588682215 +0000 UTC m=+114.602744664 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.088867 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.089186 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.589178617 +0000 UTC m=+114.603241076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.190569 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.190930 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.690912767 +0000 UTC m=+114.704975226 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.264291 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-72l9z" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.267705 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-canary\"/\"openshift-service-ca.crt\"" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.267711 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-canary\"/\"default-dockercfg-9pgs7\"" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.268799 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-canary\"/\"canary-serving-cert\"" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.270870 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.270910 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" event={"ID":"b7507dbb-ef53-4022-a311-17ba6d8b37a8","Type":"ContainerStarted","Data":"470b2591c72dca4ee53d67beee652582db5d81ba40a81328985d4ea0b3394931"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.270931 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" event={"ID":"efe05509-fd88-45b1-8393-fad7b7758f9b","Type":"ContainerStarted","Data":"5bc31df1e4e1970c91a9e328fd31f38be850fb0e78db070fff15248c0b0c5dc0"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.270947 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.270957 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-66458b6674-w67fs"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.270967 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pkvpm"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.270978 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb" event={"ID":"27037d8c-3db2-4e66-8680-7804a89fd519","Type":"ContainerStarted","Data":"5556e07fed7e91cc6196a80a8e1a6f4cbd4d512bbecf56efc0bb7b13ab3011a4"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.270990 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-747b44746d-nts9m" event={"ID":"ff01653b-8f2a-47a1-ae0c-0ac878c25570","Type":"ContainerStarted","Data":"2931176e63698b597fb7d7b475846b4ee8596e28f4e8f58aaab85d8027d44f04"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.271002 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" event={"ID":"67a37ab3-c501-4e8d-96f2-907b849e3856","Type":"ContainerStarted","Data":"d8e4673e596fdee2aa3c39c6a738c283b32bad1a393fdcfe15b65d1b30490417"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.271013 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-tqjvr"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.271020 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-canary\"/\"kube-root-ca.crt\"" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.293693 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbc76\" (UniqueName: \"kubernetes.io/projected/a5f0c418-1e6f-42c7-b846-448cd019dcbf-kube-api-access-vbc76\") pod \"ingress-canary-72l9z\" (UID: \"a5f0c418-1e6f-42c7-b846-448cd019dcbf\") " pod="openshift-ingress-canary/ingress-canary-72l9z" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.293967 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a5f0c418-1e6f-42c7-b846-448cd019dcbf-cert\") pod \"ingress-canary-72l9z\" (UID: \"a5f0c418-1e6f-42c7-b846-448cd019dcbf\") " pod="openshift-ingress-canary/ingress-canary-72l9z" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.294133 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.294784 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.794737636 +0000 UTC m=+114.808800155 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.336380 5119 patch_prober.go:28] interesting pod/apiserver-8596bd845d-l5nr4 container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.336466 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" podUID="4363b09f-d35c-47ec-b96d-c9437ccf2206" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.395472 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.395656 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.895633825 +0000 UTC m=+114.909696284 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.395855 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-vbc76\" (UniqueName: \"kubernetes.io/projected/a5f0c418-1e6f-42c7-b846-448cd019dcbf-kube-api-access-vbc76\") pod \"ingress-canary-72l9z\" (UID: \"a5f0c418-1e6f-42c7-b846-448cd019dcbf\") " pod="openshift-ingress-canary/ingress-canary-72l9z" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.395968 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a5f0c418-1e6f-42c7-b846-448cd019dcbf-cert\") pod \"ingress-canary-72l9z\" (UID: \"a5f0c418-1e6f-42c7-b846-448cd019dcbf\") " pod="openshift-ingress-canary/ingress-canary-72l9z" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.396051 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.396419 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.896401364 +0000 UTC m=+114.910463823 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.402737 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a5f0c418-1e6f-42c7-b846-448cd019dcbf-cert\") pod \"ingress-canary-72l9z\" (UID: \"a5f0c418-1e6f-42c7-b846-448cd019dcbf\") " pod="openshift-ingress-canary/ingress-canary-72l9z" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.413148 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbc76\" (UniqueName: \"kubernetes.io/projected/a5f0c418-1e6f-42c7-b846-448cd019dcbf-kube-api-access-vbc76\") pod \"ingress-canary-72l9z\" (UID: \"a5f0c418-1e6f-42c7-b846-448cd019dcbf\") " pod="openshift-ingress-canary/ingress-canary-72l9z" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.477308 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.477376 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.496922 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.497718 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.997678322 +0000 UTC m=+115.011740781 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.561860 5119 patch_prober.go:28] interesting pod/console-64d44f6ddf-fjtqb container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.30:8443/health\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.561928 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-console/console-64d44f6ddf-fjtqb" podUID="80f4b56b-65a5-40d2-9a12-0465c0ff492c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.30:8443/health\": dial tcp 10.217.0.30:8443: connect: connection refused" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.583716 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-72l9z" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.588856 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.588905 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" event={"ID":"6e32c931-da87-4115-8257-185ed217e76a","Type":"ContainerStarted","Data":"fe0907d28bfe4b6044528c5dac77d6d76289eec13cc0aad81e416d0aae0b7485"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.588937 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.588953 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" event={"ID":"e627abc4-228d-4133-8f48-393e979d9826","Type":"ContainerStarted","Data":"81fd8f10a2d32aa052b0316b43938ead36bbe778dae37e2c029d9767023f02d9"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.588973 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" event={"ID":"4363b09f-d35c-47ec-b96d-c9437ccf2206","Type":"ContainerStarted","Data":"3f8baa7b03eaa6ad784f720629ffe0f1018020f899a47fc5aa512b12ab1031f2"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.588986 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.588998 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-w8qxl"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589016 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589028 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-72l9z"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589044 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589055 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" event={"ID":"790922bf-8215-4d59-a46e-9035f3be8e32","Type":"ContainerStarted","Data":"6ce44192cbab79d274dc6829ea4fdb72ac365abcde5461fccdb9a84e0f259666"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589070 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-74545575db-twjcj"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589084 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589094 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-69db94689b-xxmhj"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589107 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" event={"ID":"16e266d0-7218-4013-a778-df876d9f8270","Type":"ContainerStarted","Data":"c7c5c86917c049f8c3b55e74da3f9122e9e9464923d47b7355320032a6b35b3e"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589119 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" event={"ID":"682184a8-29d6-4081-99ac-9d5989e169ab","Type":"ContainerStarted","Data":"7062752b599fc84632b9c7d5f6dcca29c48e7d9ae1365d8ce816e7b55627e0dd"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589132 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589150 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" event={"ID":"67e5ee6c-a546-4a02-84ad-b736baa67181","Type":"ContainerStarted","Data":"52aca634db68c22edbf2a3339b85291ed152ed2e679d7e55180c6095fbaed225"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589171 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" event={"ID":"04691874-6d27-40cd-a0c5-b5b3075a3327","Type":"ContainerStarted","Data":"24bb7d17a4c76dbf399b7caff72d1b7a16af2bf4bb100747b11d73b138f9e0f6"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589182 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" event={"ID":"4ffa4428-a504-40e8-9e04-5b8547bd6875","Type":"ContainerStarted","Data":"d5d04ce44e87eb24f323a8b1d11307163e0c5ca224751696d9cc53888c5d5314"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589193 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn" event={"ID":"f6110d7d-2373-491a-89f6-6e2068b759fe","Type":"ContainerStarted","Data":"d9cca3fc611c41cb6815886beccd0cdf25085750076b279299149b93721a048d"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589205 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" event={"ID":"695c79c0-2a93-4c7c-8bf5-660b128f6581","Type":"ContainerStarted","Data":"efb04699c76059403ab6e175bcbf45ee8c2b2b172be0d851d7ba75ac46c7610d"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589224 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" event={"ID":"9ed3b66f-12b9-4481-b6be-2d259c030348","Type":"ContainerStarted","Data":"a478593a9483803bf9f4c09aed5b4fe37e3b27520320b09b7a6bd8d0434cf0f4"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589245 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" event={"ID":"2888bde6-bdde-4277-b478-2557c52cd1e2","Type":"ContainerStarted","Data":"9c03ded41e80dabdc6594d5b51899bd1464b7c38ea11fa380aef358cc3268974"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589258 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-74545575db-twjcj" event={"ID":"db680431-c513-449a-a2d0-2df450dbbb9c","Type":"ContainerStarted","Data":"59d8d168bb907994ebc2dde0bec3caab309a81aace812e9b92c80bdc2d29bb68"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589269 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" event={"ID":"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb","Type":"ContainerStarted","Data":"c838b600ec88ba4e3ac27a0bd752cf64a98f8f5c892b81b53de10a14eb052dc9"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589290 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" event={"ID":"cb5e6bb2-b3b7-43b9-aae8-2385735815cd","Type":"ContainerStarted","Data":"b3efb0d86d9db5a5d53bcd138bf36fb9c3c40d3e8d43936115b72f2748d52d0d"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589302 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29495520-tvgs8"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589316 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-6x6hj"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589332 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589343 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589411 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" event={"ID":"1375f37f-3c35-4d19-ade4-559ffe4d22aa","Type":"ContainerStarted","Data":"c3bf3d6e506674cb2575968407354cc87be946392fa954547017a8cb09e9fa82"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589427 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-755bb95488-sxw6b"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589438 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-9ddfb9f55-9n8tq"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589448 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" event={"ID":"1758db3d-11c0-41e8-b35c-d9d296ef3b54","Type":"ContainerStarted","Data":"bebc877525f0c635675fc4c0a3183ed1e6b539b3e805ed83d831fe59d4f23bbd"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589459 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" event={"ID":"0d4dbd80-a4d6-46fb-a310-97df8cf65bc9","Type":"ContainerStarted","Data":"99f641e535793ef058e61120f6f83becddaecfd8d28c82a877b2b98c00062296"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589471 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-w8qxl" event={"ID":"b82c2f8b-0956-4050-b980-2973d235692b","Type":"ContainerStarted","Data":"055b44d186809a17fde6dbdcdaf9a216aa0c358ab4725290027737faec7fdc6d"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589483 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-nk2l2"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589497 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-64d44f6ddf-fjtqb"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589511 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-67c89758df-l9h6v"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589524 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" event={"ID":"f8304fa9-6220-4bf9-a154-177628944fc1","Type":"ContainerDied","Data":"1554609dd9b6a26b95e7718227b82984ae4a6d98cdf3e79174eb0c6a62876e2e"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589540 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" event={"ID":"4e4d65fd-a484-4711-a91f-cd04e6dfa00a","Type":"ContainerStarted","Data":"8386b1fe6263b83842590979af38f70020e5f995498033b56a905250d6106527"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589554 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-5777786469-7wrgd"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589570 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" event={"ID":"b7507dbb-ef53-4022-a311-17ba6d8b37a8","Type":"ContainerStarted","Data":"0b8c1343a9a1c75cbf85f94e8ff936254eec1b60ec8bb23d395eaaad09236448"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589589 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589604 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" event={"ID":"32f760a1-1874-479d-810f-ac7785c7b94d","Type":"ContainerStarted","Data":"bc6f4d5edc3898b52aeedf570bed40613d9eaf302be07882b5e49e0c97178643"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589619 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-7f5c659b84-xnr26"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589630 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" event={"ID":"6de3ce1c-697b-44eb-87b5-7365bab6606b","Type":"ContainerStarted","Data":"e58b76255ca4fc2271f1ad3c3255bfe068a9fe061842072ef1f760d9f5a44137"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589643 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589654 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" event={"ID":"6e32c931-da87-4115-8257-185ed217e76a","Type":"ContainerStarted","Data":"fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589664 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-69b85846b6-x62sl"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589675 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-747b44746d-nts9m"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589684 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-86c45576b9-fp4qb"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589701 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" event={"ID":"efe05509-fd88-45b1-8393-fad7b7758f9b","Type":"ContainerStarted","Data":"78bcb1d25b9c6432263f1921198c6faade4d0d677147b73a65b78e08260caa72"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589712 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589730 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" event={"ID":"1856af5f-e69c-4379-a007-f30a582f28d1","Type":"ContainerStarted","Data":"cb3ec05cba7d2ea390474126b1ce3aa0df746e816aa5e852c44bccfdf045fc70"} Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.589742 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.590131 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.592302 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878" podStartSLOduration=94.592292349 podStartE2EDuration="1m34.592292349s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:50.488331047 +0000 UTC m=+114.502393506" watchObservedRunningTime="2026-01-30 00:11:50.592292349 +0000 UTC m=+114.606354808" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.595165 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"node-bootstrapper-token\"" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.595552 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-server-tls\"" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.597133 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-server-dockercfg-dzw6b\"" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.602459 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.604371 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.610429 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.611002 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.110981299 +0000 UTC m=+115.125043758 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.613363 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.621708 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-799b87ffcd-bzgsm"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.651357 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-67c89758df-l9h6v" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.651956 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-2c878"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.684473 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-66458b6674-w67fs"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.687943 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.695474 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.695543 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.711288 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.711462 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.711688 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/dc671644-a8ef-4980-b505-388484e4645a-node-bootstrap-token\") pod \"machine-config-server-tqjvr\" (UID: \"dc671644-a8ef-4980-b505-388484e4645a\") " pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.711817 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.211788866 +0000 UTC m=+115.225851335 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.712246 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.712692 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/dc671644-a8ef-4980-b505-388484e4645a-certs\") pod \"machine-config-server-tqjvr\" (UID: \"dc671644-a8ef-4980-b505-388484e4645a\") " pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.712758 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfgs6\" (UniqueName: \"kubernetes.io/projected/dc671644-a8ef-4980-b505-388484e4645a-kube-api-access-dfgs6\") pod \"machine-config-server-tqjvr\" (UID: \"dc671644-a8ef-4980-b505-388484e4645a\") " pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.715338 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.215319931 +0000 UTC m=+115.229382470 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.743894 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-xmw98"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.763759 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.763706 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" podStartSLOduration=93.763685266 podStartE2EDuration="1m33.763685266s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.658932421 +0000 UTC m=+110.672994880" watchObservedRunningTime="2026-01-30 00:11:50.763685266 +0000 UTC m=+114.777747725" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.769952 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-755bb95488-sxw6b" podStartSLOduration=93.769932476 podStartE2EDuration="1m33.769932476s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.68586617 +0000 UTC m=+110.699928639" watchObservedRunningTime="2026-01-30 00:11:50.769932476 +0000 UTC m=+114.783994935" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.775291 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.797717 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.804790 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.814310 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.814828 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/dc671644-a8ef-4980-b505-388484e4645a-node-bootstrap-token\") pod \"machine-config-server-tqjvr\" (UID: \"dc671644-a8ef-4980-b505-388484e4645a\") " pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.815044 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.315017852 +0000 UTC m=+115.329080311 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.815328 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.815674 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/dc671644-a8ef-4980-b505-388484e4645a-certs\") pod \"machine-config-server-tqjvr\" (UID: \"dc671644-a8ef-4980-b505-388484e4645a\") " pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.815773 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-dfgs6\" (UniqueName: \"kubernetes.io/projected/dc671644-a8ef-4980-b505-388484e4645a-kube-api-access-dfgs6\") pod \"machine-config-server-tqjvr\" (UID: \"dc671644-a8ef-4980-b505-388484e4645a\") " pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.819488 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.319450378 +0000 UTC m=+115.333512937 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.827737 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/dc671644-a8ef-4980-b505-388484e4645a-certs\") pod \"machine-config-server-tqjvr\" (UID: \"dc671644-a8ef-4980-b505-388484e4645a\") " pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.828623 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/dc671644-a8ef-4980-b505-388484e4645a-node-bootstrap-token\") pod \"machine-config-server-tqjvr\" (UID: \"dc671644-a8ef-4980-b505-388484e4645a\") " pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.856343 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfgs6\" (UniqueName: \"kubernetes.io/projected/dc671644-a8ef-4980-b505-388484e4645a-kube-api-access-dfgs6\") pod \"machine-config-server-tqjvr\" (UID: \"dc671644-a8ef-4980-b505-388484e4645a\") " pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.876104 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.895531 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-74545575db-twjcj"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.917944 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5119]: E0130 00:11:50.918985 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.418969364 +0000 UTC m=+115.433031823 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.923932 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.949367 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-tqjvr" Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.950204 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-69db94689b-xxmhj"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.965934 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-w5d5n" podStartSLOduration=94.965916125 podStartE2EDuration="1m34.965916125s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.841963673 +0000 UTC m=+111.856026142" watchObservedRunningTime="2026-01-30 00:11:50.965916125 +0000 UTC m=+114.979978594" Jan 30 00:11:50 crc kubenswrapper[5119]: W0130 00:11:50.974941 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc671644_a8ef_4980_b505_388484e4645a.slice/crio-b3fb98ba48fa0d0b6c32ebbf7a1bd7f44dd9f39a22e5f245a5da520fd1d08119 WatchSource:0}: Error finding container b3fb98ba48fa0d0b6c32ebbf7a1bd7f44dd9f39a22e5f245a5da520fd1d08119: Status 404 returned error can't find the container with id b3fb98ba48fa0d0b6c32ebbf7a1bd7f44dd9f39a22e5f245a5da520fd1d08119 Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.978674 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-w8qxl"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.984490 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pkvpm"] Jan 30 00:11:50 crc kubenswrapper[5119]: I0130 00:11:50.986654 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-r97db" podStartSLOduration=94.986638844 podStartE2EDuration="1m34.986638844s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:49.18755416 +0000 UTC m=+113.201616609" watchObservedRunningTime="2026-01-30 00:11:50.986638844 +0000 UTC m=+115.000701313" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.000026 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-r7pzb" podStartSLOduration=95.000015226 podStartE2EDuration="1m35.000015226s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:50.655362778 +0000 UTC m=+114.669425237" watchObservedRunningTime="2026-01-30 00:11:51.000015226 +0000 UTC m=+115.014077695" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.001897 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-jx5df" podStartSLOduration=95.001890311 podStartE2EDuration="1m35.001890311s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:50.671873875 +0000 UTC m=+114.685936334" watchObservedRunningTime="2026-01-30 00:11:51.001890311 +0000 UTC m=+115.015952770" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.004465 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podStartSLOduration=95.004458863 podStartE2EDuration="1m35.004458863s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:50.725577738 +0000 UTC m=+114.739640217" watchObservedRunningTime="2026-01-30 00:11:51.004458863 +0000 UTC m=+115.018521422" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.019989 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klllr\" (UniqueName: \"kubernetes.io/projected/f8304fa9-6220-4bf9-a154-177628944fc1-kube-api-access-klllr\") pod \"f8304fa9-6220-4bf9-a154-177628944fc1\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.020339 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8304fa9-6220-4bf9-a154-177628944fc1-config-volume\") pod \"f8304fa9-6220-4bf9-a154-177628944fc1\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.020427 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8304fa9-6220-4bf9-a154-177628944fc1-secret-volume\") pod \"f8304fa9-6220-4bf9-a154-177628944fc1\" (UID: \"f8304fa9-6220-4bf9-a154-177628944fc1\") " Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.020845 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.020959 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8304fa9-6220-4bf9-a154-177628944fc1-config-volume" (OuterVolumeSpecName: "config-volume") pod "f8304fa9-6220-4bf9-a154-177628944fc1" (UID: "f8304fa9-6220-4bf9-a154-177628944fc1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.021202 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.521188635 +0000 UTC m=+115.535251094 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.024369 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8304fa9-6220-4bf9-a154-177628944fc1-kube-api-access-klllr" (OuterVolumeSpecName: "kube-api-access-klllr") pod "f8304fa9-6220-4bf9-a154-177628944fc1" (UID: "f8304fa9-6220-4bf9-a154-177628944fc1"). InnerVolumeSpecName "kube-api-access-klllr". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.024972 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8304fa9-6220-4bf9-a154-177628944fc1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f8304fa9-6220-4bf9-a154-177628944fc1" (UID: "f8304fa9-6220-4bf9-a154-177628944fc1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.069603 5119 ???:1] "http: TLS handshake error from 192.168.126.11:48024: no serving certificate available for the kubelet" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.122046 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.122221 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.622187757 +0000 UTC m=+115.636250216 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.122621 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.122799 5119 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8304fa9-6220-4bf9-a154-177628944fc1-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.122822 5119 reconciler_common.go:299] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8304fa9-6220-4bf9-a154-177628944fc1-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.122836 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-klllr\" (UniqueName: \"kubernetes.io/projected/f8304fa9-6220-4bf9-a154-177628944fc1-kube-api-access-klllr\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.123070 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.623053718 +0000 UTC m=+115.637116177 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.127792 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-72l9z"] Jan 30 00:11:51 crc kubenswrapper[5119]: W0130 00:11:51.132610 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5f0c418_1e6f_42c7_b846_448cd019dcbf.slice/crio-57a747e81bb99bbdbcc5090e14da6db6ebfb9e77f5c42cbd567b04c145064c4a WatchSource:0}: Error finding container 57a747e81bb99bbdbcc5090e14da6db6ebfb9e77f5c42cbd567b04c145064c4a: Status 404 returned error can't find the container with id 57a747e81bb99bbdbcc5090e14da6db6ebfb9e77f5c42cbd567b04c145064c4a Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.223558 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.223713 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.723676451 +0000 UTC m=+115.737738910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.224144 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.224472 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.724457319 +0000 UTC m=+115.738519778 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.325604 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.325730 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.825714427 +0000 UTC m=+115.839776886 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.325845 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.326096 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.826087816 +0000 UTC m=+115.840150265 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.427886 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.428032 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.9280063 +0000 UTC m=+115.942068759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.428198 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.428516 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.928504162 +0000 UTC m=+115.942566621 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.471258 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.471327 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.529408 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.529583 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.029562025 +0000 UTC m=+116.043624484 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.529682 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.529956 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.029949034 +0000 UTC m=+116.044011493 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.530049 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.531412 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.531481 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.593802 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" event={"ID":"f8304fa9-6220-4bf9-a154-177628944fc1","Type":"ContainerDied","Data":"14a6018aca423eac1c27c23f9f0b95f486d8a25e88951f032246a1c32e74dd9a"} Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.593839 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14a6018aca423eac1c27c23f9f0b95f486d8a25e88951f032246a1c32e74dd9a" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.593836 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-8cltc" Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.595118 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" event={"ID":"6f3ae473-7d01-46db-ad58-f27062e82346","Type":"ContainerStarted","Data":"733896c2347f2a08d05ccc343d944a07f019a539d2907493dffb170241232a03"} Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.596972 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" event={"ID":"c423b388-bcb0-40a3-9889-9ec109779849","Type":"ContainerStarted","Data":"063600912f4959a9882c50f264b223c86842088ee1ac0c45dfea1277a737e0b8"} Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.597787 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-72l9z" event={"ID":"a5f0c418-1e6f-42c7-b846-448cd019dcbf","Type":"ContainerStarted","Data":"57a747e81bb99bbdbcc5090e14da6db6ebfb9e77f5c42cbd567b04c145064c4a"} Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.598638 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-tqjvr" event={"ID":"dc671644-a8ef-4980-b505-388484e4645a","Type":"ContainerStarted","Data":"b3fb98ba48fa0d0b6c32ebbf7a1bd7f44dd9f39a22e5f245a5da520fd1d08119"} Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.630754 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.631102 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.131075139 +0000 UTC m=+116.145137608 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.731920 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.732273 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.232257285 +0000 UTC m=+116.246319744 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.833072 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.833387 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.333316298 +0000 UTC m=+116.347378787 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.834224 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.834626 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.334607439 +0000 UTC m=+116.348669898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.935405 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.935738 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.435688883 +0000 UTC m=+116.449751342 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5119]: I0130 00:11:51.936009 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:51 crc kubenswrapper[5119]: E0130 00:11:51.936479 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.436467882 +0000 UTC m=+116.450530341 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.037617 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.037767 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.53773983 +0000 UTC m=+116.551802289 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.037960 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.038343 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.538329534 +0000 UTC m=+116.552391993 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.067599 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/revision-pruner-6-crc"] Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.069487 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f8304fa9-6220-4bf9-a154-177628944fc1" containerName="collect-profiles" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.069524 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8304fa9-6220-4bf9-a154-177628944fc1" containerName="collect-profiles" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.069736 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="f8304fa9-6220-4bf9-a154-177628944fc1" containerName="collect-profiles" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.122716 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.122831 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.138947 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.139474 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.639418698 +0000 UTC m=+116.653481157 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.242484 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.245428 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.74541267 +0000 UTC m=+116.759475129 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.343091 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.343298 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.843270376 +0000 UTC m=+116.857332835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.343756 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.344208 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.844201148 +0000 UTC m=+116.858263607 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.411883 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/revision-pruner-6-crc"] Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.412073 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.415022 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler\"/\"kube-root-ca.crt\"" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.415628 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler\"/\"installer-sa-dockercfg-qpkss\"" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.433103 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-xcr42" podStartSLOduration=96.433085738 podStartE2EDuration="1m36.433085738s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:52.432668348 +0000 UTC m=+116.446730837" watchObservedRunningTime="2026-01-30 00:11:52.433085738 +0000 UTC m=+116.447148197" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.433937 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" podStartSLOduration=96.433932119 podStartE2EDuration="1m36.433932119s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:52.415077345 +0000 UTC m=+116.429139824" watchObservedRunningTime="2026-01-30 00:11:52.433932119 +0000 UTC m=+116.447994578" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.445292 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.445475 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.945441486 +0000 UTC m=+116.959503975 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.446506 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.449282 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.949267788 +0000 UTC m=+116.963330247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.478789 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" podStartSLOduration=95.478769928 podStartE2EDuration="1m35.478769928s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:52.476881703 +0000 UTC m=+116.490944162" watchObservedRunningTime="2026-01-30 00:11:52.478769928 +0000 UTC m=+116.492832387" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.497535 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-tnqmt" podStartSLOduration=95.4975184 podStartE2EDuration="1m35.4975184s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:52.49296667 +0000 UTC m=+116.507029129" watchObservedRunningTime="2026-01-30 00:11:52.4975184 +0000 UTC m=+116.511580859" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.531786 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.531857 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.547970 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.548141 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/516d6458-9038-4211-bc80-9bb37fc669a1-kube-api-access\") pod \"revision-pruner-6-crc\" (UID: \"516d6458-9038-4211-bc80-9bb37fc669a1\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.548233 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.04818793 +0000 UTC m=+117.062250389 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.548588 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/516d6458-9038-4211-bc80-9bb37fc669a1-kubelet-dir\") pod \"revision-pruner-6-crc\" (UID: \"516d6458-9038-4211-bc80-9bb37fc669a1\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.604025 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" event={"ID":"e627abc4-228d-4133-8f48-393e979d9826","Type":"ContainerStarted","Data":"cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4"} Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.605617 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" event={"ID":"790922bf-8215-4d59-a46e-9035f3be8e32","Type":"ContainerStarted","Data":"f1d846e94d54c40dd7461788d67346f9c2657cf79b648ad2f6e2bcee182224db"} Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.607140 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" event={"ID":"67a37ab3-c501-4e8d-96f2-907b849e3856","Type":"ContainerStarted","Data":"cd763e7b3b7cd5956aa16ea7075f199b5145ee23aaa7b529ba65f82964c8fea6"} Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.608620 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" event={"ID":"16e266d0-7218-4013-a778-df876d9f8270","Type":"ContainerStarted","Data":"5b5ce5893e1d5acffcfa9016c0ce88f8a9604372636b8296e750d85aaba4d1ae"} Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.649466 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/516d6458-9038-4211-bc80-9bb37fc669a1-kubelet-dir\") pod \"revision-pruner-6-crc\" (UID: \"516d6458-9038-4211-bc80-9bb37fc669a1\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.649576 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/516d6458-9038-4211-bc80-9bb37fc669a1-kube-api-access\") pod \"revision-pruner-6-crc\" (UID: \"516d6458-9038-4211-bc80-9bb37fc669a1\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.649595 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.649598 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/516d6458-9038-4211-bc80-9bb37fc669a1-kubelet-dir\") pod \"revision-pruner-6-crc\" (UID: \"516d6458-9038-4211-bc80-9bb37fc669a1\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.649883 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.149870388 +0000 UTC m=+117.163932847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.667545 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/516d6458-9038-4211-bc80-9bb37fc669a1-kube-api-access\") pod \"revision-pruner-6-crc\" (UID: \"516d6458-9038-4211-bc80-9bb37fc669a1\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.745831 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.753638 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.754838 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.254795064 +0000 UTC m=+117.268857523 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.855742 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.856051 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.356039101 +0000 UTC m=+117.370101560 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.957130 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.957373 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.45734418 +0000 UTC m=+117.471406629 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.957780 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:52 crc kubenswrapper[5119]: E0130 00:11:52.958198 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.458181171 +0000 UTC m=+117.472243630 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5119]: I0130 00:11:52.964681 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/revision-pruner-6-crc"] Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.058622 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.058820 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.558792373 +0000 UTC m=+117.572854832 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.058896 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.059543 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.559534551 +0000 UTC m=+117.573597010 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.160972 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.161212 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.661160988 +0000 UTC m=+117.675223457 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.161829 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.162237 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.662219573 +0000 UTC m=+117.676282042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.262719 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.263056 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.76300609 +0000 UTC m=+117.777068549 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.364510 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.364826 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.864814161 +0000 UTC m=+117.878876620 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.466230 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.466377 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.966355775 +0000 UTC m=+117.980418234 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.466633 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.466975 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.96696534 +0000 UTC m=+117.981027799 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.546626 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:53 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:11:53 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:11:53 crc kubenswrapper[5119]: healthz check failed Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.546695 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.567437 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.567836 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.067820478 +0000 UTC m=+118.081882937 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.615408 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn" event={"ID":"f6110d7d-2373-491a-89f6-6e2068b759fe","Type":"ContainerStarted","Data":"ba7aa20cff608bb6b2888e3fa470a1a030aca2325f027a4b28ce36d05501afab"} Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.617366 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" event={"ID":"04691874-6d27-40cd-a0c5-b5b3075a3327","Type":"ContainerStarted","Data":"d4a3dee12d1ac9bba167402b85d4281bd36269bb19b261dfdf3808bc2e5baf8d"} Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.618708 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-6-crc" event={"ID":"516d6458-9038-4211-bc80-9bb37fc669a1","Type":"ContainerStarted","Data":"dba376e6c998e3b05be1d7e943d66e3001ebb24dedbc975693025d507e68ef51"} Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.643582 5119 patch_prober.go:28] interesting pod/catalog-operator-75ff9f647d-c9qgc container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.643635 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" podUID="1856af5f-e69c-4379-a007-f30a582f28d1" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.643982 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.644026 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.644052 5119 patch_prober.go:28] interesting pod/oauth-openshift-66458b6674-w67fs container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.36:6443/healthz\": dial tcp 10.217.0.36:6443: connect: connection refused" start-of-body= Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.644088 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" podUID="6e32c931-da87-4115-8257-185ed217e76a" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.36:6443/healthz\": dial tcp 10.217.0.36:6443: connect: connection refused" Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.668449 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-rqpj7" podStartSLOduration=97.668431901 podStartE2EDuration="1m37.668431901s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:53.665050399 +0000 UTC m=+117.679112868" watchObservedRunningTime="2026-01-30 00:11:53.668431901 +0000 UTC m=+117.682494360" Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.668733 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.669102 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.169088266 +0000 UTC m=+118.183150725 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.671862 5119 ???:1] "http: TLS handshake error from 192.168.126.11:48032: no serving certificate available for the kubelet" Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.684230 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" podStartSLOduration=96.684213581 podStartE2EDuration="1m36.684213581s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:53.683728429 +0000 UTC m=+117.697790888" watchObservedRunningTime="2026-01-30 00:11:53.684213581 +0000 UTC m=+117.698276040" Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.708946 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" podStartSLOduration=97.708929286 podStartE2EDuration="1m37.708929286s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:53.708105656 +0000 UTC m=+117.722168125" watchObservedRunningTime="2026-01-30 00:11:53.708929286 +0000 UTC m=+117.722991745" Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.769699 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.770807 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.270779415 +0000 UTC m=+118.284841884 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.871634 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.872044 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.372031493 +0000 UTC m=+118.386093952 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.972974 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.973078 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.473060925 +0000 UTC m=+118.487123384 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5119]: I0130 00:11:53.973373 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:53 crc kubenswrapper[5119]: E0130 00:11:53.973627 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.473618458 +0000 UTC m=+118.487680917 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.075091 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.075289 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.575264356 +0000 UTC m=+118.589326815 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.075459 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.075754 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.575741867 +0000 UTC m=+118.589804326 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.176557 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.176905 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.676883591 +0000 UTC m=+118.690946050 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.277878 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.278297 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.778284083 +0000 UTC m=+118.792346542 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.379056 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.379247 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.879220963 +0000 UTC m=+118.893283422 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.379348 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.379675 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.879660573 +0000 UTC m=+118.893723102 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.480975 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.481229 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.981211918 +0000 UTC m=+118.995274377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.530598 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.533170 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:54 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:11:54 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:11:54 crc kubenswrapper[5119]: healthz check failed Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.533282 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.582701 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.583002 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.082986459 +0000 UTC m=+119.097048918 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.624074 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" event={"ID":"9ed3b66f-12b9-4481-b6be-2d259c030348","Type":"ContainerStarted","Data":"f18f7439cac64cc15d9a409a2338794ea03cc1b2fdc170b5562c730463b96600"} Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.684195 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.684622 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.184577524 +0000 UTC m=+119.198639993 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.715433 5119 patch_prober.go:28] interesting pod/catalog-operator-75ff9f647d-c9qgc container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.715489 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" podUID="1856af5f-e69c-4379-a007-f30a582f28d1" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.785532 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.785985 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.285967885 +0000 UTC m=+119.300030344 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.824125 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-9mml5" podStartSLOduration=98.824111724 podStartE2EDuration="1m38.824111724s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:54.822016173 +0000 UTC m=+118.836078632" watchObservedRunningTime="2026-01-30 00:11:54.824111724 +0000 UTC m=+118.838174183" Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.888778 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.889159 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.389125339 +0000 UTC m=+119.403187798 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:54 crc kubenswrapper[5119]: I0130 00:11:54.990010 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:54 crc kubenswrapper[5119]: E0130 00:11:54.990306 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.490294115 +0000 UTC m=+119.504356574 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.090834 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.091043 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.59100846 +0000 UTC m=+119.605070919 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.091717 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.092055 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.592043815 +0000 UTC m=+119.606106274 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.192307 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.192734 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.692707498 +0000 UTC m=+119.706769947 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.294204 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.294532 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.794517259 +0000 UTC m=+119.808579718 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.311747 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-11-crc"] Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.395685 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.395777 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.895758637 +0000 UTC m=+119.909821096 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.396179 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.396497 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.896488794 +0000 UTC m=+119.910551253 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.497047 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.497169 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.497243 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.99721498 +0000 UTC m=+120.011277439 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.497348 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.497716 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.497733 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:55.997716302 +0000 UTC m=+120.011778761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.497816 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.497855 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.505180 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.505377 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.515122 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.538198 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:55 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:11:55 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:11:55 crc kubenswrapper[5119]: healthz check failed Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.538304 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.555531 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.556761 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.599721 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.599906 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.099880071 +0000 UTC m=+120.113942530 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.600055 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.600446 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.100430535 +0000 UTC m=+120.114492994 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.700867 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.701182 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.20115567 +0000 UTC m=+120.215218129 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.701404 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.701921 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.201913488 +0000 UTC m=+120.215975947 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.721792 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.740578 5119 patch_prober.go:28] interesting pod/openshift-config-operator-5777786469-7wrgd container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.740652 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" podUID="c423b388-bcb0-40a3-9889-9ec109779849" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.752185 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.823847 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.824155 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.829596 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.329566621 +0000 UTC m=+120.343629080 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.834105 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64ebdc45-679c-4414-84fa-805ed5d07898-metrics-certs\") pod \"network-metrics-daemon-8gjq7\" (UID: \"64ebdc45-679c-4414-84fa-805ed5d07898\") " pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.837983 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-8gjq7" Jan 30 00:11:55 crc kubenswrapper[5119]: I0130 00:11:55.926807 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:55 crc kubenswrapper[5119]: E0130 00:11:55.927123 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.4271092 +0000 UTC m=+120.441171669 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.028521 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.028772 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.528755057 +0000 UTC m=+120.542817516 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: W0130 00:11:56.107763 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf863fff9_286a_45fa_b8f0_8a86994b8440.slice/crio-952386e4dc30aec9c1b426b6e4988b900976605329c9a27c5617e1121c8dcff9 WatchSource:0}: Error finding container 952386e4dc30aec9c1b426b6e4988b900976605329c9a27c5617e1121c8dcff9: Status 404 returned error can't find the container with id 952386e4dc30aec9c1b426b6e4988b900976605329c9a27c5617e1121c8dcff9 Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.131517 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.631503451 +0000 UTC m=+120.645565910 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.130283 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.138127 5119 patch_prober.go:28] interesting pod/marketplace-operator-547dbd544d-xmw98 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.138188 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" podUID="e627abc4-228d-4133-8f48-393e979d9826" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" Jan 30 00:11:56 crc kubenswrapper[5119]: W0130 00:11:56.156738 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a9ae5f6_97bd_46ac_bafa_ca1b4452a141.slice/crio-33ea1d5c7c00e68666a10aacadda1920b36cdb0dda18d2d10bf84e5587096923 WatchSource:0}: Error finding container 33ea1d5c7c00e68666a10aacadda1920b36cdb0dda18d2d10bf84e5587096923: Status 404 returned error can't find the container with id 33ea1d5c7c00e68666a10aacadda1920b36cdb0dda18d2d10bf84e5587096923 Jan 30 00:11:56 crc kubenswrapper[5119]: W0130 00:11:56.169223 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod64ebdc45_679c_4414_84fa_805ed5d07898.slice/crio-33c09db520e17028f87f48fa20ef4194a80a00898fd7770eff822a6df6932361 WatchSource:0}: Error finding container 33c09db520e17028f87f48fa20ef4194a80a00898fd7770eff822a6df6932361: Status 404 returned error can't find the container with id 33c09db520e17028f87f48fa20ef4194a80a00898fd7770eff822a6df6932361 Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.233245 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.233543 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.733525777 +0000 UTC m=+120.747588236 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.334615 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.334931 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.834916508 +0000 UTC m=+120.848978967 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.407583 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-11-crc"] Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.407627 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" event={"ID":"c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb","Type":"ContainerStarted","Data":"19abbc25d3abeb2e16309501dc466897ef2ff158d46622ec1acdf9682ba4d3a8"} Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.407741 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.407770 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-8gjq7"] Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.407815 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.410751 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.412338 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver\"/\"installer-sa-dockercfg-bqqnb\"" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.435446 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.435603 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.935580172 +0000 UTC m=+120.949642631 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.435842 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.436188 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:56.936175506 +0000 UTC m=+120.950237955 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.534318 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:56 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:11:56 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:11:56 crc kubenswrapper[5119]: healthz check failed Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.534405 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.536967 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.537189 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.037161838 +0000 UTC m=+121.051224317 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.537303 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.537372 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/391c4453-ed09-4305-a808-ede0ea232f99-kube-api-access\") pod \"revision-pruner-11-crc\" (UID: \"391c4453-ed09-4305-a808-ede0ea232f99\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.537419 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/391c4453-ed09-4305-a808-ede0ea232f99-kubelet-dir\") pod \"revision-pruner-11-crc\" (UID: \"391c4453-ed09-4305-a808-ede0ea232f99\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.537757 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.037746052 +0000 UTC m=+121.051808531 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.638197 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.638683 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/391c4453-ed09-4305-a808-ede0ea232f99-kube-api-access\") pod \"revision-pruner-11-crc\" (UID: \"391c4453-ed09-4305-a808-ede0ea232f99\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.638725 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/391c4453-ed09-4305-a808-ede0ea232f99-kubelet-dir\") pod \"revision-pruner-11-crc\" (UID: \"391c4453-ed09-4305-a808-ede0ea232f99\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.638896 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/391c4453-ed09-4305-a808-ede0ea232f99-kubelet-dir\") pod \"revision-pruner-11-crc\" (UID: \"391c4453-ed09-4305-a808-ede0ea232f99\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.643269 5119 patch_prober.go:28] interesting pod/openshift-config-operator-5777786469-7wrgd container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.643329 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" podUID="c423b388-bcb0-40a3-9889-9ec109779849" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.643604 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.643652 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" event={"ID":"f863fff9-286a-45fa-b8f0-8a86994b8440","Type":"ContainerStarted","Data":"952386e4dc30aec9c1b426b6e4988b900976605329c9a27c5617e1121c8dcff9"} Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.645110 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.145049725 +0000 UTC m=+121.159112194 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.646582 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" event={"ID":"cb5e6bb2-b3b7-43b9-aae8-2385735815cd","Type":"ContainerStarted","Data":"4c6b984f851995d9d6c5b7c89ff18568cb802d229c79cbf8a461cf2365520738"} Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.651081 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-8gjq7" event={"ID":"64ebdc45-679c-4414-84fa-805ed5d07898","Type":"ContainerStarted","Data":"33c09db520e17028f87f48fa20ef4194a80a00898fd7770eff822a6df6932361"} Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.653605 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" event={"ID":"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141","Type":"ContainerStarted","Data":"33ea1d5c7c00e68666a10aacadda1920b36cdb0dda18d2d10bf84e5587096923"} Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.661641 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" event={"ID":"17b87002-b798-480a-8e17-83053d698239","Type":"ContainerStarted","Data":"52b236f8d3560ade30ace7ead54269311f14d96ef83cb57bd0621984d5dbf30a"} Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.668646 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.677839 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/391c4453-ed09-4305-a808-ede0ea232f99-kube-api-access\") pod \"revision-pruner-11-crc\" (UID: \"391c4453-ed09-4305-a808-ede0ea232f99\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.727564 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver\"/\"installer-sa-dockercfg-bqqnb\"" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.736231 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.739650 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.740053 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.240036242 +0000 UTC m=+121.254098701 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.841064 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.841200 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.341170397 +0000 UTC m=+121.355232856 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.841588 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.842002 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.341985786 +0000 UTC m=+121.356048265 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.942673 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.942875 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.442844695 +0000 UTC m=+121.456907164 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:56 crc kubenswrapper[5119]: I0130 00:11:56.943345 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:56 crc kubenswrapper[5119]: E0130 00:11:56.943720 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.443692305 +0000 UTC m=+121.457754764 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.055029 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.055246 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.55521978 +0000 UTC m=+121.569282249 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.055360 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.055785 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.555767923 +0000 UTC m=+121.569830382 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.106337 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.106692 5119 patch_prober.go:28] interesting pod/marketplace-operator-547dbd544d-xmw98 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.106773 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" podUID="e627abc4-228d-4133-8f48-393e979d9826" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.155984 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.156108 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.656089729 +0000 UTC m=+121.670152188 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.156675 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.156972 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.65696209 +0000 UTC m=+121.671024549 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.184791 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-11-crc"] Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.258363 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.258754 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.75873528 +0000 UTC m=+121.772797739 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.359868 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.360162 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.860148162 +0000 UTC m=+121.874210631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.461316 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.461593 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:57.961577714 +0000 UTC m=+121.975640173 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.533356 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:57 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:11:57 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:11:57 crc kubenswrapper[5119]: healthz check failed Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.533446 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.562755 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.563125 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.063109348 +0000 UTC m=+122.077171807 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.663734 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.663885 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.163854924 +0000 UTC m=+122.177917393 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.664181 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.664376 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-11-crc" event={"ID":"391c4453-ed09-4305-a808-ede0ea232f99","Type":"ContainerStarted","Data":"3e6594ef8413e7553a920ecd84ff7482093f33d22c770392a03802e6acb6bef7"} Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.664551 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.16453354 +0000 UTC m=+122.178595999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.666026 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-74545575db-twjcj" event={"ID":"db680431-c513-449a-a2d0-2df450dbbb9c","Type":"ContainerStarted","Data":"7a8e81279e21392f9cd3a058f10e319ea9be0fa766b9f29c25e1c60a6acbc33d"} Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.764747 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.764888 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.264863115 +0000 UTC m=+122.278925574 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.765212 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.765532 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.265520671 +0000 UTC m=+122.279583130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.866215 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.866373 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.366344738 +0000 UTC m=+122.380407197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.866761 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.867038 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.367023854 +0000 UTC m=+122.381086303 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.968381 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.968494 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.468472067 +0000 UTC m=+122.482534546 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:57 crc kubenswrapper[5119]: I0130 00:11:57.968682 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:57 crc kubenswrapper[5119]: E0130 00:11:57.968981 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.468973419 +0000 UTC m=+122.483035878 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.069182 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.069369 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.569344335 +0000 UTC m=+122.583406794 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.069513 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.069845 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.569833017 +0000 UTC m=+122.583895466 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.170806 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.171088 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.671051864 +0000 UTC m=+122.685114343 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.272142 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.272505 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.772488556 +0000 UTC m=+122.786551015 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.373787 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.373919 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.873893228 +0000 UTC m=+122.887955687 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.374230 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.374600 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.874587424 +0000 UTC m=+122.888649883 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.475353 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.475535 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.975511084 +0000 UTC m=+122.989573543 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.475667 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.476059 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:58.976042637 +0000 UTC m=+122.990105116 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.533453 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:58 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:11:58 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:11:58 crc kubenswrapper[5119]: healthz check failed Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.533524 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.577121 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.577282 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.077257344 +0000 UTC m=+123.091319803 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.577368 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.577659 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.077647033 +0000 UTC m=+123.091709492 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.630043 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.631242 5119 patch_prober.go:28] interesting pod/packageserver-7d4fc7d867-tgxk7 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:5443/healthz\": dial tcp 10.217.0.37:5443: connect: connection refused" start-of-body= Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.631304 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" podUID="c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.37:5443/healthz\": dial tcp 10.217.0.37:5443: connect: connection refused" Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.633804 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-c9qgc" Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.636149 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-8596bd845d-l5nr4" Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.642332 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.666286 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" podStartSLOduration=101.666267967 podStartE2EDuration="1m41.666267967s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:58.648627842 +0000 UTC m=+122.662690311" watchObservedRunningTime="2026-01-30 00:11:58.666267967 +0000 UTC m=+122.680330426" Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.678722 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.679592 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.179559967 +0000 UTC m=+123.193622436 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.708126 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-rlmp7" podStartSLOduration=101.708103274 podStartE2EDuration="1m41.708103274s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:58.705690296 +0000 UTC m=+122.719752775" watchObservedRunningTime="2026-01-30 00:11:58.708103274 +0000 UTC m=+122.722165733" Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.733312 5119 patch_prober.go:28] interesting pod/openshift-config-operator-5777786469-7wrgd container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.733376 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" podUID="c423b388-bcb0-40a3-9889-9ec109779849" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.780635 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.781047 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.28103321 +0000 UTC m=+123.295095679 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.857896 5119 ???:1] "http: TLS handshake error from 192.168.126.11:48036: no serving certificate available for the kubelet" Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.881725 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.881883 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.381856048 +0000 UTC m=+123.395918507 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.882300 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.882665 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.382652637 +0000 UTC m=+123.396715096 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.983802 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.984058 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.484029758 +0000 UTC m=+123.498092217 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:58 crc kubenswrapper[5119]: I0130 00:11:58.984300 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:58 crc kubenswrapper[5119]: E0130 00:11:58.984660 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.484644092 +0000 UTC m=+123.498706551 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.085715 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.085863 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.585837089 +0000 UTC m=+123.599899548 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.086015 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.086377 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.586365541 +0000 UTC m=+123.600428000 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.187516 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.187669 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.687634969 +0000 UTC m=+123.701697428 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.187968 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.188280 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.688269045 +0000 UTC m=+123.702331504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.289530 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.289706 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.789680756 +0000 UTC m=+123.803743215 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.289793 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.290128 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.790116077 +0000 UTC m=+123.804178536 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.390996 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.391522 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.891485937 +0000 UTC m=+123.905548386 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.391821 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.392452 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.89243012 +0000 UTC m=+123.906492589 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.492991 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.493102 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.993079333 +0000 UTC m=+124.007141782 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.493402 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.493685 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:59.993675718 +0000 UTC m=+124.007738177 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.533828 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:59 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:11:59 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:11:59 crc kubenswrapper[5119]: healthz check failed Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.533915 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.558046 5119 patch_prober.go:28] interesting pod/openshift-config-operator-5777786469-7wrgd container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.558366 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" podUID="c423b388-bcb0-40a3-9889-9ec109779849" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.594722 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.595703 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.095683114 +0000 UTC m=+124.109745573 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.679755 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" event={"ID":"4ffa4428-a504-40e8-9e04-5b8547bd6875","Type":"ContainerStarted","Data":"f2e6645e2cc0246376f4fee7222d84f10e9fe3d18ea54dfed144b627bf4af027"} Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.696669 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.697164 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.197146757 +0000 UTC m=+124.211209216 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.798995 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.799296 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.299273955 +0000 UTC m=+124.313336414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:59 crc kubenswrapper[5119]: I0130 00:11:59.900522 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:11:59 crc kubenswrapper[5119]: E0130 00:11:59.900906 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.400892452 +0000 UTC m=+124.414954901 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.002745 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.003047 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.503023261 +0000 UTC m=+124.517085720 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.104832 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.105490 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.605466777 +0000 UTC m=+124.619529236 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.206995 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.207816 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.707779781 +0000 UTC m=+124.721842260 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.207958 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.208573 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.708557509 +0000 UTC m=+124.722619988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.291229 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.291990 5119 patch_prober.go:28] interesting pod/packageserver-7d4fc7d867-tgxk7 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:5443/healthz\": dial tcp 10.217.0.37:5443: connect: connection refused" start-of-body= Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.292071 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" podUID="c4ad95fd-bcbf-4e24-8df4-dae7f9b018fb" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.37:5443/healthz\": dial tcp 10.217.0.37:5443: connect: connection refused" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.292227 5119 patch_prober.go:28] interesting pod/olm-operator-5cdf44d969-bt8gg container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.292273 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" podUID="cb5e6bb2-b3b7-43b9-aae8-2385735815cd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.292501 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.308964 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.309128 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.80909926 +0000 UTC m=+124.823161719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.309661 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.310153 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.810133845 +0000 UTC m=+124.824196304 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.310752 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-74545575db-twjcj" podStartSLOduration=103.310733809 podStartE2EDuration="1m43.310733809s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:00.310023612 +0000 UTC m=+124.324086061" watchObservedRunningTime="2026-01-30 00:12:00.310733809 +0000 UTC m=+124.324796268" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.331200 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2jg89"] Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.335299 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" podStartSLOduration=103.33527208 podStartE2EDuration="1m43.33527208s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:00.332318649 +0000 UTC m=+124.346381118" watchObservedRunningTime="2026-01-30 00:12:00.33527208 +0000 UTC m=+124.349334539" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.411593 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.413002 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:00.912963551 +0000 UTC m=+124.927026010 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.485939 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2jg89"] Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.486100 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.492023 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"certified-operators-dockercfg-7cl8d\"" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.513663 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-utilities\") pod \"certified-operators-2jg89\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.513705 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-catalog-content\") pod \"certified-operators-2jg89\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.513761 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.513802 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbpkl\" (UniqueName: \"kubernetes.io/projected/c0a462d2-1b1f-47e8-9c33-1700f405a90d-kube-api-access-zbpkl\") pod \"certified-operators-2jg89\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.514115 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.014102596 +0000 UTC m=+125.028165055 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.532338 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:12:00 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:12:00 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:12:00 crc kubenswrapper[5119]: healthz check failed Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.532452 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.532633 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jqrl2"] Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.564002 5119 patch_prober.go:28] interesting pod/console-64d44f6ddf-fjtqb container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.30:8443/health\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.564083 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-console/console-64d44f6ddf-fjtqb" podUID="80f4b56b-65a5-40d2-9a12-0465c0ff492c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.30:8443/health\": dial tcp 10.217.0.30:8443: connect: connection refused" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.614303 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.614508 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.114479572 +0000 UTC m=+125.128542031 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.614742 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-zbpkl\" (UniqueName: \"kubernetes.io/projected/c0a462d2-1b1f-47e8-9c33-1700f405a90d-kube-api-access-zbpkl\") pod \"certified-operators-2jg89\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.614881 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-utilities\") pod \"certified-operators-2jg89\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.614919 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-catalog-content\") pod \"certified-operators-2jg89\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.615046 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.615368 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.115362204 +0000 UTC m=+125.129424653 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.615804 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-utilities\") pod \"certified-operators-2jg89\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.615833 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-catalog-content\") pod \"certified-operators-2jg89\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.620975 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jqrl2"] Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.621111 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.623478 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"community-operators-dockercfg-vrd5f\"" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.641933 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbpkl\" (UniqueName: \"kubernetes.io/projected/c0a462d2-1b1f-47e8-9c33-1700f405a90d-kube-api-access-zbpkl\") pod \"certified-operators-2jg89\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.725950 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.726632 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-catalog-content\") pod \"community-operators-jqrl2\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.726664 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-utilities\") pod \"community-operators-jqrl2\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.726701 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwcnq\" (UniqueName: \"kubernetes.io/projected/88f818c0-b63a-4707-a2ab-49c0e9c58544-kube-api-access-vwcnq\") pod \"community-operators-jqrl2\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.726840 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.226813287 +0000 UTC m=+125.240875736 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.728367 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z9zsl"] Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.828276 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-vwcnq\" (UniqueName: \"kubernetes.io/projected/88f818c0-b63a-4707-a2ab-49c0e9c58544-kube-api-access-vwcnq\") pod \"community-operators-jqrl2\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.828351 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.828402 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-catalog-content\") pod \"community-operators-jqrl2\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.828425 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-utilities\") pod \"community-operators-jqrl2\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.828775 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.328754531 +0000 UTC m=+125.342816990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.828797 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-utilities\") pod \"community-operators-jqrl2\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.828978 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-catalog-content\") pod \"community-operators-jqrl2\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.849060 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.849671 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwcnq\" (UniqueName: \"kubernetes.io/projected/88f818c0-b63a-4707-a2ab-49c0e9c58544-kube-api-access-vwcnq\") pod \"community-operators-jqrl2\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.931682 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.931871 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.431840993 +0000 UTC m=+125.445903452 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.932438 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:00 crc kubenswrapper[5119]: E0130 00:12:00.932733 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.432718444 +0000 UTC m=+125.446780893 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.981066 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:00 crc kubenswrapper[5119]: I0130 00:12:00.988973 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.006838 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z9zsl"] Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.006885 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-29jhj"] Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.035097 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.035459 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-catalog-content\") pod \"certified-operators-z9zsl\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.035497 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8q82v\" (UniqueName: \"kubernetes.io/projected/868949ba-fadb-4cc1-88d4-503a4049f66d-kube-api-access-8q82v\") pod \"certified-operators-z9zsl\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.035563 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-utilities\") pod \"certified-operators-z9zsl\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.035723 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.535696864 +0000 UTC m=+125.549759323 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.113154 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" podStartSLOduration=105.113136518 podStartE2EDuration="1m45.113136518s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:01.111111669 +0000 UTC m=+125.125174128" watchObservedRunningTime="2026-01-30 00:12:01.113136518 +0000 UTC m=+125.127198977" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.138171 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-catalog-content\") pod \"certified-operators-z9zsl\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.138221 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-8q82v\" (UniqueName: \"kubernetes.io/projected/868949ba-fadb-4cc1-88d4-503a4049f66d-kube-api-access-8q82v\") pod \"certified-operators-z9zsl\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.138342 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.138370 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-utilities\") pod \"certified-operators-z9zsl\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.140130 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.640117178 +0000 UTC m=+125.654179637 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.140199 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-catalog-content\") pod \"certified-operators-z9zsl\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.142699 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-utilities\") pod \"certified-operators-z9zsl\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.168884 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-8q82v\" (UniqueName: \"kubernetes.io/projected/868949ba-fadb-4cc1-88d4-503a4049f66d-kube-api-access-8q82v\") pod \"certified-operators-z9zsl\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.180227 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-6p645" event={"ID":"2888bde6-bdde-4277-b478-2557c52cd1e2","Type":"ContainerStarted","Data":"fb481149acc408acf8fef7b0188be9bf311d438c655447da59570b45781eb31c"} Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.180294 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" event={"ID":"1375f37f-3c35-4d19-ade4-559ffe4d22aa","Type":"ContainerStarted","Data":"82f19502839959b36eec243b92046cda2f55fd0c929e1eed459d1b986eae0312"} Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.180313 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-29jhj"] Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.180342 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2jg89"] Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.180505 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.240081 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.240253 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-utilities\") pod \"community-operators-29jhj\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.242904 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.742882432 +0000 UTC m=+125.756944891 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.242952 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-catalog-content\") pod \"community-operators-29jhj\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.242994 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jnjj\" (UniqueName: \"kubernetes.io/projected/f304f5b4-43b0-460e-b28c-f2af396db9a1-kube-api-access-8jnjj\") pod \"community-operators-29jhj\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.243113 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.243458 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.743446565 +0000 UTC m=+125.757509024 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.281780 5119 patch_prober.go:28] interesting pod/olm-operator-5cdf44d969-bt8gg container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.281852 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" podUID="cb5e6bb2-b3b7-43b9-aae8-2385735815cd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.301633 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-799b87ffcd-bzgsm" podStartSLOduration=105.301614655 podStartE2EDuration="1m45.301614655s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:01.301170434 +0000 UTC m=+125.315232903" watchObservedRunningTime="2026-01-30 00:12:01.301614655 +0000 UTC m=+125.315677114" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.322226 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.344012 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.344196 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-utilities\") pod \"community-operators-29jhj\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.344239 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-catalog-content\") pod \"community-operators-29jhj\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.344265 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-8jnjj\" (UniqueName: \"kubernetes.io/projected/f304f5b4-43b0-460e-b28c-f2af396db9a1-kube-api-access-8jnjj\") pod \"community-operators-29jhj\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.345152 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.845133493 +0000 UTC m=+125.859195952 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.345647 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-utilities\") pod \"community-operators-29jhj\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.346285 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-catalog-content\") pod \"community-operators-29jhj\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.366981 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jnjj\" (UniqueName: \"kubernetes.io/projected/f304f5b4-43b0-460e-b28c-f2af396db9a1-kube-api-access-8jnjj\") pod \"community-operators-29jhj\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.445329 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.445786 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:01.945771596 +0000 UTC m=+125.959834055 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.471565 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.471622 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.501619 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jqrl2"] Jan 30 00:12:01 crc kubenswrapper[5119]: W0130 00:12:01.514250 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88f818c0_b63a_4707_a2ab_49c0e9c58544.slice/crio-37050a125359e6253497ce3c02c472beafc88140ca48a22fac629dfba78ace59 WatchSource:0}: Error finding container 37050a125359e6253497ce3c02c472beafc88140ca48a22fac629dfba78ace59: Status 404 returned error can't find the container with id 37050a125359e6253497ce3c02c472beafc88140ca48a22fac629dfba78ace59 Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.526277 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.533043 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:12:01 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:12:01 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:12:01 crc kubenswrapper[5119]: healthz check failed Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.533096 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.549220 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.549288 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.049261557 +0000 UTC m=+126.063324006 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.550285 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.550662 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.050648191 +0000 UTC m=+126.064710690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.651163 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.651558 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.15154123 +0000 UTC m=+126.165603689 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.721027 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z9zsl"] Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.753665 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.754359 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.254340955 +0000 UTC m=+126.268403404 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.784447 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-72l9z" event={"ID":"a5f0c418-1e6f-42c7-b846-448cd019dcbf","Type":"ContainerStarted","Data":"68a60828c037a8228c1ce004bb04ad617a110b90d362f3d6dc45678502e9787f"} Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.795002 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-tqjvr" event={"ID":"dc671644-a8ef-4980-b505-388484e4645a","Type":"ContainerStarted","Data":"c1c13725c010faad4fa7fddbd91f5d00c4dfd9588a82fdaa9a58b6eb87bf645c"} Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.797197 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2jg89" event={"ID":"c0a462d2-1b1f-47e8-9c33-1700f405a90d","Type":"ContainerStarted","Data":"07a0629827bad9428219265f2d8638f06de8784946e24cf5a8fd1bb658527aa7"} Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.804243 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zsl" event={"ID":"868949ba-fadb-4cc1-88d4-503a4049f66d","Type":"ContainerStarted","Data":"0448985a2c6ebcbc7c59cfc3dd291703bbc70a751a7e1cbe1e7f96022797737e"} Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.806047 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-w8qxl" event={"ID":"b82c2f8b-0956-4050-b980-2973d235692b","Type":"ContainerStarted","Data":"09f0dddbd662818ebfe5530b97b932541064158b7bf7763f7541a4c7bebf8d36"} Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.829893 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/3.log" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.831442 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"b34cbd9138535d7259b96939a704686f8e02efc79a8714278adb31a3c06fcd23"} Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.833750 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" event={"ID":"4e4d65fd-a484-4711-a91f-cd04e6dfa00a","Type":"ContainerStarted","Data":"ffdc793d87c79423479f13237c7c18cc48541d7ffc7e23b97d6131d872a5dcf8"} Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.838524 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-72l9z" podStartSLOduration=22.838507051 podStartE2EDuration="22.838507051s" podCreationTimestamp="2026-01-30 00:11:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:01.837894616 +0000 UTC m=+125.851957075" watchObservedRunningTime="2026-01-30 00:12:01.838507051 +0000 UTC m=+125.852569510" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.854287 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-tqjvr" podStartSLOduration=22.854269171 podStartE2EDuration="22.854269171s" podCreationTimestamp="2026-01-30 00:11:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:01.853690047 +0000 UTC m=+125.867752506" watchObservedRunningTime="2026-01-30 00:12:01.854269171 +0000 UTC m=+125.868331630" Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.862521 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.862711 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" event={"ID":"32f760a1-1874-479d-810f-ac7785c7b94d","Type":"ContainerStarted","Data":"63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0"} Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.862779 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.362750405 +0000 UTC m=+126.376812864 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.862923 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.863304 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.363297568 +0000 UTC m=+126.377360017 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.863594 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqrl2" event={"ID":"88f818c0-b63a-4707-a2ab-49c0e9c58544","Type":"ContainerStarted","Data":"37050a125359e6253497ce3c02c472beafc88140ca48a22fac629dfba78ace59"} Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.964554 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.964687 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.464660518 +0000 UTC m=+126.478722977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:01 crc kubenswrapper[5119]: I0130 00:12:01.964952 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:01 crc kubenswrapper[5119]: E0130 00:12:01.965233 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.465220022 +0000 UTC m=+126.479282481 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.007170 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-29jhj"] Jan 30 00:12:02 crc kubenswrapper[5119]: W0130 00:12:02.012322 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf304f5b4_43b0_460e_b28c_f2af396db9a1.slice/crio-f1863e2f0ab0fc798af09d6b0fcce0062b74c51096b456624e38b207599a2da8 WatchSource:0}: Error finding container f1863e2f0ab0fc798af09d6b0fcce0062b74c51096b456624e38b207599a2da8: Status 404 returned error can't find the container with id f1863e2f0ab0fc798af09d6b0fcce0062b74c51096b456624e38b207599a2da8 Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.066603 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.066800 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.566772997 +0000 UTC m=+126.580835456 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.066954 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.067255 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.567242188 +0000 UTC m=+126.581304647 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.123107 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.123150 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.167831 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.168043 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.667992904 +0000 UTC m=+126.682055383 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.168297 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.168621 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.668606559 +0000 UTC m=+126.682669018 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.227806 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.227849 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.290327 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.290553 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" podStartSLOduration=23.290535924 podStartE2EDuration="23.290535924s" podCreationTimestamp="2026-01-30 00:11:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:02.24550558 +0000 UTC m=+126.259568039" watchObservedRunningTime="2026-01-30 00:12:02.290535924 +0000 UTC m=+126.304598383" Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.290736 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.790712408 +0000 UTC m=+126.804774867 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.290976 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.292899 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.79287034 +0000 UTC m=+126.806932819 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.298566 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" podStartSLOduration=106.298539077 podStartE2EDuration="1m46.298539077s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:02.295494444 +0000 UTC m=+126.309556903" watchObservedRunningTime="2026-01-30 00:12:02.298539077 +0000 UTC m=+126.312601536" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.308851 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.333279 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xrxfc"] Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.354065 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=39.354047613 podStartE2EDuration="39.354047613s" podCreationTimestamp="2026-01-30 00:11:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:02.349991596 +0000 UTC m=+126.364054045" watchObservedRunningTime="2026-01-30 00:12:02.354047613 +0000 UTC m=+126.368110072" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.393485 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.393748 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.893722019 +0000 UTC m=+126.907784468 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.496134 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.496465 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:02.996449692 +0000 UTC m=+127.010512151 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.533247 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:12:02 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:12:02 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:12:02 crc kubenswrapper[5119]: healthz check failed Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.533328 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.597103 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.597430 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.097372342 +0000 UTC m=+127.111434801 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.699322 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.699662 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.199649164 +0000 UTC m=+127.213711623 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.801688 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.801889 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.301864095 +0000 UTC m=+127.315926554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.802433 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.802728 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.302721026 +0000 UTC m=+127.316783485 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.846018 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xrxfc"] Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.846425 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.850960 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-marketplace-dockercfg-gg4w7\"" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.877887 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xmlxg"] Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.904040 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.904497 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-utilities\") pod \"redhat-marketplace-xrxfc\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.904538 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-catalog-content\") pod \"redhat-marketplace-xrxfc\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:02 crc kubenswrapper[5119]: I0130 00:12:02.904571 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dj8j\" (UniqueName: \"kubernetes.io/projected/b956d3a0-19db-47f7-bc95-b3371c1e7968-kube-api-access-7dj8j\") pod \"redhat-marketplace-xrxfc\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:02 crc kubenswrapper[5119]: E0130 00:12:02.904749 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.404685071 +0000 UTC m=+127.418747540 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.006244 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.006324 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-utilities\") pod \"redhat-marketplace-xrxfc\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.006539 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-catalog-content\") pod \"redhat-marketplace-xrxfc\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.006594 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-7dj8j\" (UniqueName: \"kubernetes.io/projected/b956d3a0-19db-47f7-bc95-b3371c1e7968-kube-api-access-7dj8j\") pod \"redhat-marketplace-xrxfc\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.006924 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-utilities\") pod \"redhat-marketplace-xrxfc\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.006936 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.506907342 +0000 UTC m=+127.520969981 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.007325 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-catalog-content\") pod \"redhat-marketplace-xrxfc\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.030122 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dj8j\" (UniqueName: \"kubernetes.io/projected/b956d3a0-19db-47f7-bc95-b3371c1e7968-kube-api-access-7dj8j\") pod \"redhat-marketplace-xrxfc\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.047241 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmlxg"] Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.047290 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn" event={"ID":"f6110d7d-2373-491a-89f6-6e2068b759fe","Type":"ContainerStarted","Data":"c4cae2f798ec2e591f38687551cb92845604fd5b213ea25be7bfdc3d76609e37"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.047315 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-lkcvr"] Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.047338 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" event={"ID":"04691874-6d27-40cd-a0c5-b5b3075a3327","Type":"ContainerStarted","Data":"b99bc036373e53ee5d8d74c73b5f7afc276413a5e1470c222ce051025fa3ac11"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.047908 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-5777786469-7wrgd" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.047932 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29jhj" event={"ID":"f304f5b4-43b0-460e-b28c-f2af396db9a1","Type":"ContainerStarted","Data":"f1863e2f0ab0fc798af09d6b0fcce0062b74c51096b456624e38b207599a2da8"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.047967 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-6-crc" event={"ID":"516d6458-9038-4211-bc80-9bb37fc669a1","Type":"ContainerStarted","Data":"c6a55595cad6f49ade38e5c591a23097622baae047f07428867a4f1c7cff2962"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.047981 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" event={"ID":"790922bf-8215-4d59-a46e-9035f3be8e32","Type":"ContainerStarted","Data":"ab61624c6fd5f833d8151a412b5c79f44dfdeab0d0277899f57fc6e128b686f5"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.048347 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.115533 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.115951 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.615914536 +0000 UTC m=+127.629976995 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.116406 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dfwj\" (UniqueName: \"kubernetes.io/projected/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-kube-api-access-9dfwj\") pod \"redhat-marketplace-xmlxg\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.116526 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.116755 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-utilities\") pod \"redhat-marketplace-xmlxg\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.116829 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-catalog-content\") pod \"redhat-marketplace-xmlxg\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.117354 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.61733687 +0000 UTC m=+127.631399329 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.166048 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.218568 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.218878 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.718847214 +0000 UTC m=+127.732909673 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.219130 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-utilities\") pod \"redhat-marketplace-xmlxg\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.219169 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-catalog-content\") pod \"redhat-marketplace-xmlxg\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.219573 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-9dfwj\" (UniqueName: \"kubernetes.io/projected/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-kube-api-access-9dfwj\") pod \"redhat-marketplace-xmlxg\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.219692 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-catalog-content\") pod \"redhat-marketplace-xmlxg\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.219728 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.219773 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-utilities\") pod \"redhat-marketplace-xmlxg\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.220298 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.720277389 +0000 UTC m=+127.734339848 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.250786 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dfwj\" (UniqueName: \"kubernetes.io/projected/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-kube-api-access-9dfwj\") pod \"redhat-marketplace-xmlxg\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.321272 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.321698 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.8216759 +0000 UTC m=+127.835738359 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.373642 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.383595 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.402793 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" podStartSLOduration=106.402773792 podStartE2EDuration="1m46.402773792s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:03.401123312 +0000 UTC m=+127.415185781" watchObservedRunningTime="2026-01-30 00:12:03.402773792 +0000 UTC m=+127.416836251" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.421586 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-4trwn" podStartSLOduration=106.421569735 podStartE2EDuration="1m46.421569735s" podCreationTimestamp="2026-01-30 00:10:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:03.417432905 +0000 UTC m=+127.431495384" watchObservedRunningTime="2026-01-30 00:12:03.421569735 +0000 UTC m=+127.435632194" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.422563 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.422896 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:03.922882746 +0000 UTC m=+127.936945205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.491986 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-2jnpf" podStartSLOduration=107.49197086 podStartE2EDuration="1m47.49197086s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:03.488582208 +0000 UTC m=+127.502644667" watchObservedRunningTime="2026-01-30 00:12:03.49197086 +0000 UTC m=+127.506033319" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.523360 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.525026 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.024953574 +0000 UTC m=+128.039016053 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.534474 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-889qp"] Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.535772 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:12:03 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:12:03 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:12:03 crc kubenswrapper[5119]: healthz check failed Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.536500 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.600407 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-889qp"] Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.600452 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xrxfc"] Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.600598 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.604375 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-operators-dockercfg-9gxlh\"" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.625850 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.626382 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.126361746 +0000 UTC m=+128.140424205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: W0130 00:12:03.641706 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb956d3a0_19db_47f7_bc95_b3371c1e7968.slice/crio-f7f9596f1d8fbbf965e4c760eabcc7200bc168e3f8bd9acc750c7f481ce301c1 WatchSource:0}: Error finding container f7f9596f1d8fbbf965e4c760eabcc7200bc168e3f8bd9acc750c7f481ce301c1: Status 404 returned error can't find the container with id f7f9596f1d8fbbf965e4c760eabcc7200bc168e3f8bd9acc750c7f481ce301c1 Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.727976 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.728176 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjhv6\" (UniqueName: \"kubernetes.io/projected/b5189257-f48b-458c-a523-2c1d73cd3f63-kube-api-access-zjhv6\") pod \"redhat-operators-889qp\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.728257 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-utilities\") pod \"redhat-operators-889qp\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.728288 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-catalog-content\") pod \"redhat-operators-889qp\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.728433 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.228382452 +0000 UTC m=+128.242444911 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.730273 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmlxg"] Jan 30 00:12:03 crc kubenswrapper[5119]: W0130 00:12:03.796691 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc1343c5b_f89c_4044_9a5a_36db14bb5ed9.slice/crio-4860794945e1db9efa4e7ff3ddc490b199845db3d60b0e11e1101786e63a48c3 WatchSource:0}: Error finding container 4860794945e1db9efa4e7ff3ddc490b199845db3d60b0e11e1101786e63a48c3: Status 404 returned error can't find the container with id 4860794945e1db9efa4e7ff3ddc490b199845db3d60b0e11e1101786e63a48c3 Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.829683 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-utilities\") pod \"redhat-operators-889qp\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.829733 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-catalog-content\") pod \"redhat-operators-889qp\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.829772 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-zjhv6\" (UniqueName: \"kubernetes.io/projected/b5189257-f48b-458c-a523-2c1d73cd3f63-kube-api-access-zjhv6\") pod \"redhat-operators-889qp\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.829818 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.830090 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.33007793 +0000 UTC m=+128.344140379 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.830568 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-utilities\") pod \"redhat-operators-889qp\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.830778 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-catalog-content\") pod \"redhat-operators-889qp\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.855166 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjhv6\" (UniqueName: \"kubernetes.io/projected/b5189257-f48b-458c-a523-2c1d73cd3f63-kube-api-access-zjhv6\") pod \"redhat-operators-889qp\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.918955 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vf8tm"] Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.930732 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.930853 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.430822736 +0000 UTC m=+128.444885205 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.931352 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:03 crc kubenswrapper[5119]: E0130 00:12:03.931775 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.431746138 +0000 UTC m=+128.445808607 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.955691 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vf8tm"] Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.955848 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.964084 5119 generic.go:358] "Generic (PLEG): container finished" podID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerID="81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6" exitCode=0 Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.964160 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2jg89" event={"ID":"c0a462d2-1b1f-47e8-9c33-1700f405a90d","Type":"ContainerDied","Data":"81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.966971 5119 generic.go:358] "Generic (PLEG): container finished" podID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerID="865a96337a7d93d869b72c4e68ee626bfc37e130ff1192f6fdd9ea9fee137e83" exitCode=0 Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.967034 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zsl" event={"ID":"868949ba-fadb-4cc1-88d4-503a4049f66d","Type":"ContainerDied","Data":"865a96337a7d93d869b72c4e68ee626bfc37e130ff1192f6fdd9ea9fee137e83"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.971171 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmlxg" event={"ID":"c1343c5b-f89c-4044-9a5a-36db14bb5ed9","Type":"ContainerStarted","Data":"4860794945e1db9efa4e7ff3ddc490b199845db3d60b0e11e1101786e63a48c3"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.974377 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-w8qxl" event={"ID":"b82c2f8b-0956-4050-b980-2973d235692b","Type":"ContainerStarted","Data":"b776ce9eb0213c79de81550e8b252a0c347a382482ea34107800b4021e52e58c"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.976078 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-dns/dns-default-w8qxl" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.981742 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" event={"ID":"17b87002-b798-480a-8e17-83053d698239","Type":"ContainerStarted","Data":"1e678287ebb7722bfb6fefe75bd5b1397ed6b5721dbae77863864623bbaca4d4"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.982476 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.988363 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xrxfc" event={"ID":"b956d3a0-19db-47f7-bc95-b3371c1e7968","Type":"ContainerStarted","Data":"f7f9596f1d8fbbf965e4c760eabcc7200bc168e3f8bd9acc750c7f481ce301c1"} Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.989826 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.991688 5119 generic.go:358] "Generic (PLEG): container finished" podID="516d6458-9038-4211-bc80-9bb37fc669a1" containerID="c6a55595cad6f49ade38e5c591a23097622baae047f07428867a4f1c7cff2962" exitCode=0 Jan 30 00:12:03 crc kubenswrapper[5119]: I0130 00:12:03.991853 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-6-crc" event={"ID":"516d6458-9038-4211-bc80-9bb37fc669a1","Type":"ContainerDied","Data":"c6a55595cad6f49ade38e5c591a23097622baae047f07428867a4f1c7cff2962"} Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.001160 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" event={"ID":"f863fff9-286a-45fa-b8f0-8a86994b8440","Type":"ContainerStarted","Data":"f331920cb49f12c7ac1dbb32dc285849507660a81a67cfc49b91499714518872"} Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.014379 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" event={"ID":"4ffa4428-a504-40e8-9e04-5b8547bd6875","Type":"ContainerStarted","Data":"c157291ffb0bf9d75253abab72e5d0309e327b19bf89089f8d0b8c0d29c7381f"} Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.017323 5119 generic.go:358] "Generic (PLEG): container finished" podID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerID="088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb" exitCode=0 Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.017404 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqrl2" event={"ID":"88f818c0-b63a-4707-a2ab-49c0e9c58544","Type":"ContainerDied","Data":"088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb"} Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.018787 5119 generic.go:358] "Generic (PLEG): container finished" podID="391c4453-ed09-4305-a808-ede0ea232f99" containerID="d16f7908c067766c40bf950130af8c34b916a55dbb0db60d61a9b2043ed848d8" exitCode=0 Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.018819 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-11-crc" event={"ID":"391c4453-ed09-4305-a808-ede0ea232f99","Type":"ContainerDied","Data":"d16f7908c067766c40bf950130af8c34b916a55dbb0db60d61a9b2043ed848d8"} Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.023062 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-8gjq7" event={"ID":"64ebdc45-679c-4414-84fa-805ed5d07898","Type":"ContainerStarted","Data":"3e9127aa68adbb373d915856e5e6f853eff01970535abd0e443b8d60368a3f86"} Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.025355 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" event={"ID":"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141","Type":"ContainerStarted","Data":"57765690af10d9ef8171c9c7f33b34690f16b3f38c19be00225963e6def102aa"} Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.026739 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" podUID="32f760a1-1874-479d-810f-ac7785c7b94d" containerName="kube-multus-additional-cni-plugins" containerID="cri-o://63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" gracePeriod=30 Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.032698 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.033008 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-catalog-content\") pod \"redhat-operators-vf8tm\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.033221 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-utilities\") pod \"redhat-operators-vf8tm\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.033315 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggsl8\" (UniqueName: \"kubernetes.io/projected/af9d2341-a591-42b6-91e5-8177924152b8-kube-api-access-ggsl8\") pod \"redhat-operators-vf8tm\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.033438 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.533409006 +0000 UTC m=+128.547471465 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.078112 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-w8qxl" podStartSLOduration=25.078090322 podStartE2EDuration="25.078090322s" podCreationTimestamp="2026-01-30 00:11:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:04.069903314 +0000 UTC m=+128.083965773" watchObservedRunningTime="2026-01-30 00:12:04.078090322 +0000 UTC m=+128.092152781" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.095637 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-69db94689b-xxmhj" podStartSLOduration=108.095618144 podStartE2EDuration="1m48.095618144s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:04.087202081 +0000 UTC m=+128.101264570" watchObservedRunningTime="2026-01-30 00:12:04.095618144 +0000 UTC m=+128.109680613" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.134106 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-utilities\") pod \"redhat-operators-vf8tm\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.134195 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-ggsl8\" (UniqueName: \"kubernetes.io/projected/af9d2341-a591-42b6-91e5-8177924152b8-kube-api-access-ggsl8\") pod \"redhat-operators-vf8tm\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.134281 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-catalog-content\") pod \"redhat-operators-vf8tm\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.134319 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.137890 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-utilities\") pod \"redhat-operators-vf8tm\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.139378 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-catalog-content\") pod \"redhat-operators-vf8tm\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.139462 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.639443919 +0000 UTC m=+128.653506448 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.165746 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggsl8\" (UniqueName: \"kubernetes.io/projected/af9d2341-a591-42b6-91e5-8177924152b8-kube-api-access-ggsl8\") pod \"redhat-operators-vf8tm\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.237145 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.237327 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.737295255 +0000 UTC m=+128.751357734 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.237599 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.237909 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.737897779 +0000 UTC m=+128.751960238 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.295745 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.315308 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.332998 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.338162 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.338617 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.838600714 +0000 UTC m=+128.852663173 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.439779 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/516d6458-9038-4211-bc80-9bb37fc669a1-kubelet-dir\") pod \"516d6458-9038-4211-bc80-9bb37fc669a1\" (UID: \"516d6458-9038-4211-bc80-9bb37fc669a1\") " Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.439850 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/516d6458-9038-4211-bc80-9bb37fc669a1-kube-api-access\") pod \"516d6458-9038-4211-bc80-9bb37fc669a1\" (UID: \"516d6458-9038-4211-bc80-9bb37fc669a1\") " Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.440035 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/516d6458-9038-4211-bc80-9bb37fc669a1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "516d6458-9038-4211-bc80-9bb37fc669a1" (UID: "516d6458-9038-4211-bc80-9bb37fc669a1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.440228 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.440487 5119 reconciler_common.go:299] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/516d6458-9038-4211-bc80-9bb37fc669a1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.441090 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:04.941077361 +0000 UTC m=+128.955139820 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.448578 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/516d6458-9038-4211-bc80-9bb37fc669a1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "516d6458-9038-4211-bc80-9bb37fc669a1" (UID: "516d6458-9038-4211-bc80-9bb37fc669a1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.538047 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:12:04 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:12:04 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:12:04 crc kubenswrapper[5119]: healthz check failed Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.538108 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.543444 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.543607 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.043569058 +0000 UTC m=+129.057631527 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.543950 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.544078 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/516d6458-9038-4211-bc80-9bb37fc669a1-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.544505 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.044384118 +0000 UTC m=+129.058446577 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.565704 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vf8tm"] Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.572944 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-889qp"] Jan 30 00:12:04 crc kubenswrapper[5119]: W0130 00:12:04.583329 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5189257_f48b_458c_a523_2c1d73cd3f63.slice/crio-4b6896bfc64e2f9d00c7528c683d1bbdb7580972278ecaf01de0268fab455564 WatchSource:0}: Error finding container 4b6896bfc64e2f9d00c7528c683d1bbdb7580972278ecaf01de0268fab455564: Status 404 returned error can't find the container with id 4b6896bfc64e2f9d00c7528c683d1bbdb7580972278ecaf01de0268fab455564 Jan 30 00:12:04 crc kubenswrapper[5119]: W0130 00:12:04.583610 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf9d2341_a591_42b6_91e5_8177924152b8.slice/crio-fd8adcfd8a3cd748066d7be919c84bb8da65c894e76de64001a1c4df00e4b901 WatchSource:0}: Error finding container fd8adcfd8a3cd748066d7be919c84bb8da65c894e76de64001a1c4df00e4b901: Status 404 returned error can't find the container with id fd8adcfd8a3cd748066d7be919c84bb8da65c894e76de64001a1c4df00e4b901 Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.645302 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.645526 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.145479202 +0000 UTC m=+129.159541661 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.646021 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.646374 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.146362933 +0000 UTC m=+129.160425452 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.747176 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.747417 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.247370055 +0000 UTC m=+129.261432514 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.747692 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.748105 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.248089193 +0000 UTC m=+129.262151652 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.849283 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.849598 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.349575046 +0000 UTC m=+129.363637515 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:04 crc kubenswrapper[5119]: I0130 00:12:04.951122 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:04 crc kubenswrapper[5119]: E0130 00:12:04.951569 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.45154863 +0000 UTC m=+129.465611089 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.031714 5119 generic.go:358] "Generic (PLEG): container finished" podID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerID="807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba" exitCode=0 Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.031800 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xrxfc" event={"ID":"b956d3a0-19db-47f7-bc95-b3371c1e7968","Type":"ContainerDied","Data":"807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba"} Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.034380 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-889qp" event={"ID":"b5189257-f48b-458c-a523-2c1d73cd3f63","Type":"ContainerStarted","Data":"4b6896bfc64e2f9d00c7528c683d1bbdb7580972278ecaf01de0268fab455564"} Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.035814 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" event={"ID":"6de3ce1c-697b-44eb-87b5-7365bab6606b","Type":"ContainerStarted","Data":"f60def7b05a1282bb38098dd3d0b62764c41c4e87676dfec70b1f88707b9d7b1"} Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.038054 5119 generic.go:358] "Generic (PLEG): container finished" podID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerID="1213d75c9903cd7e76bd5b5f15b35bf3c4f7cff8a224710a4abe224f1021aaa9" exitCode=0 Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.038143 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29jhj" event={"ID":"f304f5b4-43b0-460e-b28c-f2af396db9a1","Type":"ContainerDied","Data":"1213d75c9903cd7e76bd5b5f15b35bf3c4f7cff8a224710a4abe224f1021aaa9"} Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.040922 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.041099 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-6-crc" event={"ID":"516d6458-9038-4211-bc80-9bb37fc669a1","Type":"ContainerDied","Data":"dba376e6c998e3b05be1d7e943d66e3001ebb24dedbc975693025d507e68ef51"} Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.041123 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dba376e6c998e3b05be1d7e943d66e3001ebb24dedbc975693025d507e68ef51" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.042257 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vf8tm" event={"ID":"af9d2341-a591-42b6-91e5-8177924152b8","Type":"ContainerStarted","Data":"fd8adcfd8a3cd748066d7be919c84bb8da65c894e76de64001a1c4df00e4b901"} Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.046352 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-8gjq7" event={"ID":"64ebdc45-679c-4414-84fa-805ed5d07898","Type":"ContainerStarted","Data":"f6de34a9bfd5aa51860295434be555e748e30ac2637c8a82af2ca33051a8f645"} Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.048586 5119 generic.go:358] "Generic (PLEG): container finished" podID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerID="f98b615a1458df8bff7d5837015ac4754a0a3e3b52e5dfc8dc054bb305ebb3e8" exitCode=0 Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.049578 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmlxg" event={"ID":"c1343c5b-f89c-4044-9a5a-36db14bb5ed9","Type":"ContainerDied","Data":"f98b615a1458df8bff7d5837015ac4754a0a3e3b52e5dfc8dc054bb305ebb3e8"} Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.051752 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.051973 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.551945297 +0000 UTC m=+129.566007766 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.052403 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.052902 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.5528919 +0000 UTC m=+129.566954369 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.140556 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-8gjq7" podStartSLOduration=109.14053909 podStartE2EDuration="1m49.14053909s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:05.138307086 +0000 UTC m=+129.152369555" watchObservedRunningTime="2026-01-30 00:12:05.14053909 +0000 UTC m=+129.154601549" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.155018 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.156022 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.655937351 +0000 UTC m=+129.669999800 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.226545 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.256501 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.257931 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.757918466 +0000 UTC m=+129.771980925 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.350071 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.350418 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.356989 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.357154 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/391c4453-ed09-4305-a808-ede0ea232f99-kube-api-access\") pod \"391c4453-ed09-4305-a808-ede0ea232f99\" (UID: \"391c4453-ed09-4305-a808-ede0ea232f99\") " Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.357271 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.357354 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/391c4453-ed09-4305-a808-ede0ea232f99-kubelet-dir\") pod \"391c4453-ed09-4305-a808-ede0ea232f99\" (UID: \"391c4453-ed09-4305-a808-ede0ea232f99\") " Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.357716 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/391c4453-ed09-4305-a808-ede0ea232f99-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "391c4453-ed09-4305-a808-ede0ea232f99" (UID: "391c4453-ed09-4305-a808-ede0ea232f99"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.357789 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.85777269 +0000 UTC m=+129.871835149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.369813 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/391c4453-ed09-4305-a808-ede0ea232f99-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "391c4453-ed09-4305-a808-ede0ea232f99" (UID: "391c4453-ed09-4305-a808-ede0ea232f99"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.458928 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.459207 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:05.959195402 +0000 UTC m=+129.973257861 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.459299 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/391c4453-ed09-4305-a808-ede0ea232f99-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.459311 5119 reconciler_common.go:299] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/391c4453-ed09-4305-a808-ede0ea232f99-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.532975 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:12:05 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:12:05 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:12:05 crc kubenswrapper[5119]: healthz check failed Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.533042 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.560220 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.560408 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.060364118 +0000 UTC m=+130.074426577 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.560792 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.561214 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.061198898 +0000 UTC m=+130.075261357 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.662860 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.663090 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.16304725 +0000 UTC m=+130.177109739 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.663624 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.664063 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.164044494 +0000 UTC m=+130.178106953 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.765683 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.765911 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.265875276 +0000 UTC m=+130.279937885 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.867950 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.868446 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.368424745 +0000 UTC m=+130.382487214 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:05 crc kubenswrapper[5119]: I0130 00:12:05.969142 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:05 crc kubenswrapper[5119]: E0130 00:12:05.969505 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.469460227 +0000 UTC m=+130.483522696 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.059070 5119 generic.go:358] "Generic (PLEG): container finished" podID="af9d2341-a591-42b6-91e5-8177924152b8" containerID="ae1f37d2126139d5c321717de3c7fca46efe21bba09cc3a90a417fad16f14157" exitCode=0 Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.059209 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vf8tm" event={"ID":"af9d2341-a591-42b6-91e5-8177924152b8","Type":"ContainerDied","Data":"ae1f37d2126139d5c321717de3c7fca46efe21bba09cc3a90a417fad16f14157"} Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.061791 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-11-crc" event={"ID":"391c4453-ed09-4305-a808-ede0ea232f99","Type":"ContainerDied","Data":"3e6594ef8413e7553a920ecd84ff7482093f33d22c770392a03802e6acb6bef7"} Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.061829 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.061855 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e6594ef8413e7553a920ecd84ff7482093f33d22c770392a03802e6acb6bef7" Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.063769 5119 generic.go:358] "Generic (PLEG): container finished" podID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerID="13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc" exitCode=0 Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.063904 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-889qp" event={"ID":"b5189257-f48b-458c-a523-2c1d73cd3f63","Type":"ContainerDied","Data":"13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc"} Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.069172 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-9ddfb9f55-9n8tq" Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.070936 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.071357 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.57133439 +0000 UTC m=+130.585396929 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.171872 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.172165 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.672055355 +0000 UTC m=+130.686117814 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.173545 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.175252 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.675235712 +0000 UTC m=+130.689298171 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.275062 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.275282 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.77525489 +0000 UTC m=+130.789317349 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.379208 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.379567 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.879554701 +0000 UTC m=+130.893617160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.481055 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.481271 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.981241929 +0000 UTC m=+130.995304388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.481555 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.481970 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:06.981952366 +0000 UTC m=+130.996014825 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.533597 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:12:06 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:12:06 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:12:06 crc kubenswrapper[5119]: healthz check failed Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.533701 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.582429 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.582644 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.08261223 +0000 UTC m=+131.096674699 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.583058 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.583553 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.083542632 +0000 UTC m=+131.097605091 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.684737 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.685063 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.185047606 +0000 UTC m=+131.199110075 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.787562 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.792783 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.292763229 +0000 UTC m=+131.306825688 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.893768 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.894192 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.394174471 +0000 UTC m=+131.408236930 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:06 crc kubenswrapper[5119]: I0130 00:12:06.997141 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:06 crc kubenswrapper[5119]: E0130 00:12:06.997561 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.497537749 +0000 UTC m=+131.511600208 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.098012 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.098253 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.598213673 +0000 UTC m=+131.612276142 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.200134 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.200523 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.700511536 +0000 UTC m=+131.714573995 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.302073 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.302260 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.802229365 +0000 UTC m=+131.816291844 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.302728 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.303299 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.803287371 +0000 UTC m=+131.817349830 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.404675 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.404868 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.904837973 +0000 UTC m=+131.918900432 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.405360 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.405817 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:07.905800237 +0000 UTC m=+131.919862696 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.507729 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.508526 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.008442844 +0000 UTC m=+132.022505303 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.534004 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:12:07 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:12:07 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:12:07 crc kubenswrapper[5119]: healthz check failed Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.534082 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.609237 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.610045 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.110019416 +0000 UTC m=+132.124081875 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.711514 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.711753 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.21171047 +0000 UTC m=+132.225772929 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.712302 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.713160 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.213148315 +0000 UTC m=+132.227210774 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.813960 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.814223 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.314179984 +0000 UTC m=+132.328242443 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.814662 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.815000 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.314985133 +0000 UTC m=+132.329047592 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:07 crc kubenswrapper[5119]: I0130 00:12:07.917753 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:07 crc kubenswrapper[5119]: E0130 00:12:07.918040 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.41800642 +0000 UTC m=+132.432068879 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.026453 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.026858 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.526845669 +0000 UTC m=+132.540908128 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.128362 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.128620 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.628601485 +0000 UTC m=+132.642663944 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.230138 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.230454 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.730442143 +0000 UTC m=+132.744504602 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.331829 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.331968 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.831939532 +0000 UTC m=+132.846002001 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.332307 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.332670 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.83266201 +0000 UTC m=+132.846724469 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.436137 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.436316 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.936288552 +0000 UTC m=+132.950351011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.437806 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.438247 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:08.938204239 +0000 UTC m=+132.952266748 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.534741 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:12:08 crc kubenswrapper[5119]: [-]has-synced failed: reason withheld Jan 30 00:12:08 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:12:08 crc kubenswrapper[5119]: healthz check failed Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.534801 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.539195 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.539448 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.03942886 +0000 UTC m=+133.053491309 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.640270 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.640643 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.140622823 +0000 UTC m=+133.154685282 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.741135 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.741349 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.241314982 +0000 UTC m=+133.255377451 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.741453 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.741794 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.241779994 +0000 UTC m=+133.255842453 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.842546 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.843210 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.343192621 +0000 UTC m=+133.357255070 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:08 crc kubenswrapper[5119]: I0130 00:12:08.946761 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:08 crc kubenswrapper[5119]: E0130 00:12:08.947050 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.447038038 +0000 UTC m=+133.461100497 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.048683 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.048882 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.548859686 +0000 UTC m=+133.562922155 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.049606 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.049978 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.549966693 +0000 UTC m=+133.564029152 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.119008 5119 ???:1] "http: TLS handshake error from 192.168.126.11:51164: no serving certificate available for the kubelet" Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.150819 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.151109 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.651091413 +0000 UTC m=+133.665153862 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.252867 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.253243 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.753226359 +0000 UTC m=+133.767288818 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.355304 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.355714 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.855697592 +0000 UTC m=+133.869760051 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.456850 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.457133 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:09.95712086 +0000 UTC m=+133.971183319 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.532569 5119 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-tcmzw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:12:09 crc kubenswrapper[5119]: [+]has-synced ok Jan 30 00:12:09 crc kubenswrapper[5119]: [+]process-running ok Jan 30 00:12:09 crc kubenswrapper[5119]: healthz check failed Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.532631 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" podUID="b7507dbb-ef53-4022-a311-17ba6d8b37a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.558667 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.558848 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.058820314 +0000 UTC m=+134.072882773 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.559794 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.560182 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.060168307 +0000 UTC m=+134.074230766 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.660726 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.661113 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.161096293 +0000 UTC m=+134.175158752 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.762813 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.763261 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.263244468 +0000 UTC m=+134.277306927 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.864564 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.864768 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.364740878 +0000 UTC m=+134.378803337 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.865091 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.865594 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.365528247 +0000 UTC m=+134.379590706 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:09 crc kubenswrapper[5119]: I0130 00:12:09.967369 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:09 crc kubenswrapper[5119]: E0130 00:12:09.967978 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.467950839 +0000 UTC m=+134.482013298 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.069235 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.069556 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.569541251 +0000 UTC m=+134.583603710 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.170832 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.170986 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.670962949 +0000 UTC m=+134.685025408 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.171161 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.171599 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.671573534 +0000 UTC m=+134.685636003 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.272268 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.272780 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.772753706 +0000 UTC m=+134.786816155 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.294435 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-tgxk7" Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.374496 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.375864 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.875848874 +0000 UTC m=+134.889911383 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.480093 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.480267 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.980239764 +0000 UTC m=+134.994302223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.480542 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.480913 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:10.98089752 +0000 UTC m=+134.994959979 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.533243 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.536609 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-68cf44c8b8-tcmzw" Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.565867 5119 patch_prober.go:28] interesting pod/console-64d44f6ddf-fjtqb container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.30:8443/health\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.565931 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-console/console-64d44f6ddf-fjtqb" podUID="80f4b56b-65a5-40d2-9a12-0465c0ff492c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.30:8443/health\": dial tcp 10.217.0.30:8443: connect: connection refused" Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.581484 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.581720 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.081688593 +0000 UTC m=+135.095751052 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.682912 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.683365 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.183346676 +0000 UTC m=+135.197409135 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.784545 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.784759 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.284722243 +0000 UTC m=+135.298784702 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.784848 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.785267 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.285248136 +0000 UTC m=+135.299310595 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.886218 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.886361 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.386332145 +0000 UTC m=+135.400394604 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.886913 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.887843 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.387828812 +0000 UTC m=+135.401891271 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.988542 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.988786 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.488749417 +0000 UTC m=+135.502811886 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:10 crc kubenswrapper[5119]: I0130 00:12:10.988859 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:10 crc kubenswrapper[5119]: E0130 00:12:10.989193 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.489179318 +0000 UTC m=+135.503241777 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.090192 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.090355 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.590330529 +0000 UTC m=+135.604392988 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.090479 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.090892 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.590875442 +0000 UTC m=+135.604937901 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.128699 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-w8qxl" Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.191440 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.191666 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.691637354 +0000 UTC m=+135.705699813 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.192037 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.192328 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.6923197 +0000 UTC m=+135.706382159 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.286116 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-bt8gg" Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.293508 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.293731 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.793696167 +0000 UTC m=+135.807758626 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.395150 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.395676 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.895654788 +0000 UTC m=+135.909717247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.471502 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.471576 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.471630 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.472154 5119 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"2931176e63698b597fb7d7b475846b4ee8596e28f4e8f58aaab85d8027d44f04"} pod="openshift-console/downloads-747b44746d-nts9m" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.472213 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" containerID="cri-o://2931176e63698b597fb7d7b475846b4ee8596e28f4e8f58aaab85d8027d44f04" gracePeriod=2 Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.472480 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.472571 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.496800 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.497032 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.997006404 +0000 UTC m=+136.011068893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.497498 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.497830 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:11.997816904 +0000 UTC m=+136.011879363 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.599102 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.599297 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.099268562 +0000 UTC m=+136.113331021 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.599677 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.599989 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.099976539 +0000 UTC m=+136.114038998 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.700917 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.701072 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.201047039 +0000 UTC m=+136.215109498 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.701298 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.701682 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.201664514 +0000 UTC m=+136.215726973 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.802582 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.802727 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.302704602 +0000 UTC m=+136.316767061 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.802977 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.803275 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.303267196 +0000 UTC m=+136.317329645 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.904726 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.905830 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.405804551 +0000 UTC m=+136.419867010 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:11 crc kubenswrapper[5119]: I0130 00:12:11.907689 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:11 crc kubenswrapper[5119]: E0130 00:12:11.910580 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.410555757 +0000 UTC m=+136.424618216 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.009043 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.009323 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.509305129 +0000 UTC m=+136.523367588 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.033590 5119 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.110604 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.110950 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.610935841 +0000 UTC m=+136.624998300 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.121434 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" event={"ID":"6de3ce1c-697b-44eb-87b5-7365bab6606b","Type":"ContainerStarted","Data":"ec121ee63ecb898f3fe1e751f48a725db8dca256555ba874e8cbf66b71ad01e3"} Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.123651 5119 generic.go:358] "Generic (PLEG): container finished" podID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerID="2931176e63698b597fb7d7b475846b4ee8596e28f4e8f58aaab85d8027d44f04" exitCode=0 Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.123729 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-747b44746d-nts9m" event={"ID":"ff01653b-8f2a-47a1-ae0c-0ac878c25570","Type":"ContainerDied","Data":"2931176e63698b597fb7d7b475846b4ee8596e28f4e8f58aaab85d8027d44f04"} Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.212884 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.213039 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.713006205 +0000 UTC m=+136.727068664 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.213279 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.213991 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.713976588 +0000 UTC m=+136.728039037 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.234144 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.236557 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.238153 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.238218 5119 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" podUID="32f760a1-1874-479d-810f-ac7785c7b94d" containerName="kube-multus-additional-cni-plugins" probeResult="unknown" Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.316208 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.316796 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.81677656 +0000 UTC m=+136.830839019 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.417702 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.418177 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:12.918130126 +0000 UTC m=+136.932192585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.519222 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.519409 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:13.019369769 +0000 UTC m=+137.033432228 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.519821 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.520168 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:13.020151528 +0000 UTC m=+137.034213987 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.621821 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.622084 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:13.122036677 +0000 UTC m=+137.136099136 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.622526 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.622926 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:13.122917179 +0000 UTC m=+137.136979638 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.723596 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.723800 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:13.223769653 +0000 UTC m=+137.237832122 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.724112 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.724489 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:12:13.22447105 +0000 UTC m=+137.238533509 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-c5xkv" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.825096 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:12 crc kubenswrapper[5119]: E0130 00:12:12.825385 5119 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:12:13.325366955 +0000 UTC m=+137.339429414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.873598 5119 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-30T00:12:12.033618842Z","UUID":"fe0dcfa0-4979-4bf5-9f7c-4bba4b9c4f63","Handler":null,"Name":"","Endpoint":""} Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.924933 5119 csi_plugin.go:106] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.926607 5119 csi_plugin.go:119] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.927475 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.938439 5119 csi_attacher.go:373] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.938486 5119 operation_generator.go:557] "MountVolume.MountDevice succeeded for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b1264ac67579ad07e7e9003054d44fe40dd55285a4b2f7dc74e48be1aee0868a/globalmount\"" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:12 crc kubenswrapper[5119]: I0130 00:12:12.965544 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-c5xkv\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:13 crc kubenswrapper[5119]: I0130 00:12:13.029331 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:12:13 crc kubenswrapper[5119]: I0130 00:12:13.055608 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"registry-dockercfg-6w67b\"" Jan 30 00:12:13 crc kubenswrapper[5119]: I0130 00:12:13.064615 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:12:13 crc kubenswrapper[5119]: I0130 00:12:13.069934 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (OuterVolumeSpecName: "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2". PluginName "kubernetes.io/csi", VolumeGIDValue "" Jan 30 00:12:13 crc kubenswrapper[5119]: I0130 00:12:13.388041 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:12:14 crc kubenswrapper[5119]: I0130 00:12:14.754844 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e9b5059-1b3e-4067-a63d-2952cbe863af" path="/var/lib/kubelet/pods/9e9b5059-1b3e-4067-a63d-2952cbe863af/volumes" Jan 30 00:12:20 crc kubenswrapper[5119]: I0130 00:12:20.564526 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:12:20 crc kubenswrapper[5119]: I0130 00:12:20.569166 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-64d44f6ddf-fjtqb" Jan 30 00:12:21 crc kubenswrapper[5119]: I0130 00:12:21.473345 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:21 crc kubenswrapper[5119]: I0130 00:12:21.473460 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:22 crc kubenswrapper[5119]: E0130 00:12:22.231911 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:22 crc kubenswrapper[5119]: E0130 00:12:22.234261 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:22 crc kubenswrapper[5119]: E0130 00:12:22.236169 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:22 crc kubenswrapper[5119]: E0130 00:12:22.236220 5119 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" podUID="32f760a1-1874-479d-810f-ac7785c7b94d" containerName="kube-multus-additional-cni-plugins" probeResult="unknown" Jan 30 00:12:29 crc kubenswrapper[5119]: I0130 00:12:29.625469 5119 ???:1] "http: TLS handshake error from 192.168.126.11:49104: no serving certificate available for the kubelet" Jan 30 00:12:31 crc kubenswrapper[5119]: I0130 00:12:31.473328 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:31 crc kubenswrapper[5119]: I0130 00:12:31.473900 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:32 crc kubenswrapper[5119]: E0130 00:12:32.231244 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:32 crc kubenswrapper[5119]: E0130 00:12:32.232568 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:32 crc kubenswrapper[5119]: E0130 00:12:32.233763 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:32 crc kubenswrapper[5119]: E0130 00:12:32.233799 5119 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" podUID="32f760a1-1874-479d-810f-ac7785c7b94d" containerName="kube-multus-additional-cni-plugins" probeResult="unknown" Jan 30 00:12:33 crc kubenswrapper[5119]: I0130 00:12:33.520453 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-12-crc"] Jan 30 00:12:33 crc kubenswrapper[5119]: I0130 00:12:33.521038 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="391c4453-ed09-4305-a808-ede0ea232f99" containerName="pruner" Jan 30 00:12:33 crc kubenswrapper[5119]: I0130 00:12:33.521054 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="391c4453-ed09-4305-a808-ede0ea232f99" containerName="pruner" Jan 30 00:12:33 crc kubenswrapper[5119]: I0130 00:12:33.521094 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="516d6458-9038-4211-bc80-9bb37fc669a1" containerName="pruner" Jan 30 00:12:33 crc kubenswrapper[5119]: I0130 00:12:33.521100 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="516d6458-9038-4211-bc80-9bb37fc669a1" containerName="pruner" Jan 30 00:12:33 crc kubenswrapper[5119]: I0130 00:12:33.521197 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="516d6458-9038-4211-bc80-9bb37fc669a1" containerName="pruner" Jan 30 00:12:33 crc kubenswrapper[5119]: I0130 00:12:33.521208 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="391c4453-ed09-4305-a808-ede0ea232f99" containerName="pruner" Jan 30 00:12:35 crc kubenswrapper[5119]: I0130 00:12:35.250459 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-lkcvr_32f760a1-1874-479d-810f-ac7785c7b94d/kube-multus-additional-cni-plugins/0.log" Jan 30 00:12:35 crc kubenswrapper[5119]: I0130 00:12:35.250500 5119 generic.go:358] "Generic (PLEG): container finished" podID="32f760a1-1874-479d-810f-ac7785c7b94d" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" exitCode=137 Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.732797 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-wmt9q" Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.732896 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.737847 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.737950 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver\"/\"installer-sa-dockercfg-bqqnb\"" Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.755676 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d52cae6f-022b-494d-bc48-0dab7b961ad2-kube-api-access\") pod \"revision-pruner-12-crc\" (UID: \"d52cae6f-022b-494d-bc48-0dab7b961ad2\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.755896 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d52cae6f-022b-494d-bc48-0dab7b961ad2-kubelet-dir\") pod \"revision-pruner-12-crc\" (UID: \"d52cae6f-022b-494d-bc48-0dab7b961ad2\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.761891 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-12-crc"] Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.762010 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.762045 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" event={"ID":"32f760a1-1874-479d-810f-ac7785c7b94d","Type":"ContainerDied","Data":"63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0"} Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.856941 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d52cae6f-022b-494d-bc48-0dab7b961ad2-kubelet-dir\") pod \"revision-pruner-12-crc\" (UID: \"d52cae6f-022b-494d-bc48-0dab7b961ad2\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.856994 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d52cae6f-022b-494d-bc48-0dab7b961ad2-kube-api-access\") pod \"revision-pruner-12-crc\" (UID: \"d52cae6f-022b-494d-bc48-0dab7b961ad2\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.857063 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d52cae6f-022b-494d-bc48-0dab7b961ad2-kubelet-dir\") pod \"revision-pruner-12-crc\" (UID: \"d52cae6f-022b-494d-bc48-0dab7b961ad2\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:37 crc kubenswrapper[5119]: I0130 00:12:37.883070 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d52cae6f-022b-494d-bc48-0dab7b961ad2-kube-api-access\") pod \"revision-pruner-12-crc\" (UID: \"d52cae6f-022b-494d-bc48-0dab7b961ad2\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:38 crc kubenswrapper[5119]: I0130 00:12:38.073314 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:41 crc kubenswrapper[5119]: I0130 00:12:41.473309 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:41 crc kubenswrapper[5119]: I0130 00:12:41.473945 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:42 crc kubenswrapper[5119]: E0130 00:12:42.228666 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0 is running failed: container process not found" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:42 crc kubenswrapper[5119]: E0130 00:12:42.229172 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0 is running failed: container process not found" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:42 crc kubenswrapper[5119]: E0130 00:12:42.229510 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0 is running failed: container process not found" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:42 crc kubenswrapper[5119]: E0130 00:12:42.229566 5119 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0 is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" podUID="32f760a1-1874-479d-810f-ac7785c7b94d" containerName="kube-multus-additional-cni-plugins" probeResult="unknown" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.212433 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-12-crc"] Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.466162 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-12-crc"] Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.466334 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.543112 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kubelet-dir\") pod \"installer-12-crc\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.543166 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-var-lock\") pod \"installer-12-crc\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.543197 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kube-api-access\") pod \"installer-12-crc\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.644449 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kube-api-access\") pod \"installer-12-crc\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.644586 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kubelet-dir\") pod \"installer-12-crc\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.644605 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-var-lock\") pod \"installer-12-crc\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.644664 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-var-lock\") pod \"installer-12-crc\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.644827 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kubelet-dir\") pod \"installer-12-crc\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.663471 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kube-api-access\") pod \"installer-12-crc\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.788947 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.843182 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-lkcvr_32f760a1-1874-479d-810f-ac7785c7b94d/kube-multus-additional-cni-plugins/0.log" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.843246 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.947516 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/32f760a1-1874-479d-810f-ac7785c7b94d-ready\") pod \"32f760a1-1874-479d-810f-ac7785c7b94d\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.947618 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgwxn\" (UniqueName: \"kubernetes.io/projected/32f760a1-1874-479d-810f-ac7785c7b94d-kube-api-access-wgwxn\") pod \"32f760a1-1874-479d-810f-ac7785c7b94d\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.947669 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/32f760a1-1874-479d-810f-ac7785c7b94d-tuning-conf-dir\") pod \"32f760a1-1874-479d-810f-ac7785c7b94d\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.947700 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/32f760a1-1874-479d-810f-ac7785c7b94d-cni-sysctl-allowlist\") pod \"32f760a1-1874-479d-810f-ac7785c7b94d\" (UID: \"32f760a1-1874-479d-810f-ac7785c7b94d\") " Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.947707 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/32f760a1-1874-479d-810f-ac7785c7b94d-tuning-conf-dir" (OuterVolumeSpecName: "tuning-conf-dir") pod "32f760a1-1874-479d-810f-ac7785c7b94d" (UID: "32f760a1-1874-479d-810f-ac7785c7b94d"). InnerVolumeSpecName "tuning-conf-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.947992 5119 reconciler_common.go:299] "Volume detached for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/32f760a1-1874-479d-810f-ac7785c7b94d-tuning-conf-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.948086 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32f760a1-1874-479d-810f-ac7785c7b94d-ready" (OuterVolumeSpecName: "ready") pod "32f760a1-1874-479d-810f-ac7785c7b94d" (UID: "32f760a1-1874-479d-810f-ac7785c7b94d"). InnerVolumeSpecName "ready". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.948267 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32f760a1-1874-479d-810f-ac7785c7b94d-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "32f760a1-1874-479d-810f-ac7785c7b94d" (UID: "32f760a1-1874-479d-810f-ac7785c7b94d"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:12:44 crc kubenswrapper[5119]: I0130 00:12:44.951529 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32f760a1-1874-479d-810f-ac7785c7b94d-kube-api-access-wgwxn" (OuterVolumeSpecName: "kube-api-access-wgwxn") pod "32f760a1-1874-479d-810f-ac7785c7b94d" (UID: "32f760a1-1874-479d-810f-ac7785c7b94d"). InnerVolumeSpecName "kube-api-access-wgwxn". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.049289 5119 reconciler_common.go:299] "Volume detached for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/32f760a1-1874-479d-810f-ac7785c7b94d-ready\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.049667 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-wgwxn\" (UniqueName: \"kubernetes.io/projected/32f760a1-1874-479d-810f-ac7785c7b94d-kube-api-access-wgwxn\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.049681 5119 reconciler_common.go:299] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/32f760a1-1874-479d-810f-ac7785c7b94d-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.226334 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-c5xkv"] Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.298689 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-12-crc"] Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.305231 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" event={"ID":"6de3ce1c-697b-44eb-87b5-7365bab6606b","Type":"ContainerStarted","Data":"1cd96899eea0a6789fd2fd47b24f80555d2bf5c4749dbe290fa40661335eb973"} Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.306781 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-lkcvr_32f760a1-1874-479d-810f-ac7785c7b94d/kube-multus-additional-cni-plugins/0.log" Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.306826 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" event={"ID":"32f760a1-1874-479d-810f-ac7785c7b94d","Type":"ContainerDied","Data":"bc6f4d5edc3898b52aeedf570bed40613d9eaf302be07882b5e49e0c97178643"} Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.306964 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-lkcvr" Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.307871 5119 scope.go:117] "RemoveContainer" containerID="63723f0acde8b4a62af093d6111b39fbecb03b048a2891e5d37a85c4939670f0" Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.330994 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-lkcvr"] Jan 30 00:12:45 crc kubenswrapper[5119]: I0130 00:12:45.333603 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-lkcvr"] Jan 30 00:12:46 crc kubenswrapper[5119]: I0130 00:12:46.972984 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" podStartSLOduration=67.972965288 podStartE2EDuration="1m7.972965288s" podCreationTimestamp="2026-01-30 00:11:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:46.968265893 +0000 UTC m=+170.982328392" watchObservedRunningTime="2026-01-30 00:12:46.972965288 +0000 UTC m=+170.987027747" Jan 30 00:12:47 crc kubenswrapper[5119]: I0130 00:12:47.757508 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:47 crc kubenswrapper[5119]: I0130 00:12:47.757601 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:47 crc kubenswrapper[5119]: I0130 00:12:47.957324 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32f760a1-1874-479d-810f-ac7785c7b94d" path="/var/lib/kubelet/pods/32f760a1-1874-479d-810f-ac7785c7b94d/volumes" Jan 30 00:12:47 crc kubenswrapper[5119]: I0130 00:12:47.958081 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-12-crc"] Jan 30 00:12:47 crc kubenswrapper[5119]: I0130 00:12:47.958118 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:12:47 crc kubenswrapper[5119]: I0130 00:12:47.958207 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkvpm" event={"ID":"6de3ce1c-697b-44eb-87b5-7365bab6606b","Type":"ContainerStarted","Data":"62f3365ab5e9cc6a8eb3b43cf2b04ddb81402354af53aa7a3c5eff476d5dd82c"} Jan 30 00:12:47 crc kubenswrapper[5119]: I0130 00:12:47.958226 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-747b44746d-nts9m" event={"ID":"ff01653b-8f2a-47a1-ae0c-0ac878c25570","Type":"ContainerStarted","Data":"2219fc3e88342e5a75629a3c1792df7c0bc7a93155946a8e442892451fae96e0"} Jan 30 00:12:48 crc kubenswrapper[5119]: I0130 00:12:48.335350 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:48 crc kubenswrapper[5119]: I0130 00:12:48.335515 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:48 crc kubenswrapper[5119]: W0130 00:12:48.865204 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a0a210e_8e3a_452a_b20c_a2e2f11bf3a6.slice/crio-bd6bf265a0de281cc350dda6da518af22a0777e0c249a0e11846caf62c4dc90f WatchSource:0}: Error finding container bd6bf265a0de281cc350dda6da518af22a0777e0c249a0e11846caf62c4dc90f: Status 404 returned error can't find the container with id bd6bf265a0de281cc350dda6da518af22a0777e0c249a0e11846caf62c4dc90f Jan 30 00:12:48 crc kubenswrapper[5119]: W0130 00:12:48.873185 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod0f34372a_8c67_4ddf_85e2_14c85e52fc7e.slice/crio-0ea66dbc4d001575c448ad97f196654ca28bcb790df7cd5025e9a152ab6d9d4c WatchSource:0}: Error finding container 0ea66dbc4d001575c448ad97f196654ca28bcb790df7cd5025e9a152ab6d9d4c: Status 404 returned error can't find the container with id 0ea66dbc4d001575c448ad97f196654ca28bcb790df7cd5025e9a152ab6d9d4c Jan 30 00:12:49 crc kubenswrapper[5119]: I0130 00:12:49.341324 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-12-crc" event={"ID":"0f34372a-8c67-4ddf-85e2-14c85e52fc7e","Type":"ContainerStarted","Data":"0ea66dbc4d001575c448ad97f196654ca28bcb790df7cd5025e9a152ab6d9d4c"} Jan 30 00:12:49 crc kubenswrapper[5119]: I0130 00:12:49.343212 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" event={"ID":"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6","Type":"ContainerStarted","Data":"bd6bf265a0de281cc350dda6da518af22a0777e0c249a0e11846caf62c4dc90f"} Jan 30 00:12:49 crc kubenswrapper[5119]: I0130 00:12:49.344268 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-12-crc" event={"ID":"d52cae6f-022b-494d-bc48-0dab7b961ad2","Type":"ContainerStarted","Data":"3be20d9b01287da468bb1f9b98d2b61799653c741b9658bb40ed9b400c621810"} Jan 30 00:12:51 crc kubenswrapper[5119]: I0130 00:12:51.471661 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:51 crc kubenswrapper[5119]: I0130 00:12:51.472309 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:52 crc kubenswrapper[5119]: I0130 00:12:52.362279 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmlxg" event={"ID":"c1343c5b-f89c-4044-9a5a-36db14bb5ed9","Type":"ContainerStarted","Data":"2a5bff2313083f8acb1544588a14d492cd240afd60f9b29b255c427c2984b130"} Jan 30 00:12:53 crc kubenswrapper[5119]: I0130 00:12:53.369257 5119 generic.go:358] "Generic (PLEG): container finished" podID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerID="2a5bff2313083f8acb1544588a14d492cd240afd60f9b29b255c427c2984b130" exitCode=0 Jan 30 00:12:53 crc kubenswrapper[5119]: I0130 00:12:53.369305 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmlxg" event={"ID":"c1343c5b-f89c-4044-9a5a-36db14bb5ed9","Type":"ContainerDied","Data":"2a5bff2313083f8acb1544588a14d492cd240afd60f9b29b255c427c2984b130"} Jan 30 00:12:54 crc kubenswrapper[5119]: I0130 00:12:54.383628 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqrl2" event={"ID":"88f818c0-b63a-4707-a2ab-49c0e9c58544","Type":"ContainerStarted","Data":"170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408"} Jan 30 00:12:55 crc kubenswrapper[5119]: I0130 00:12:55.393806 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2jg89" event={"ID":"c0a462d2-1b1f-47e8-9c33-1700f405a90d","Type":"ContainerStarted","Data":"d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2"} Jan 30 00:12:55 crc kubenswrapper[5119]: I0130 00:12:55.396453 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zsl" event={"ID":"868949ba-fadb-4cc1-88d4-503a4049f66d","Type":"ContainerStarted","Data":"a38e2183b5d6096ef28362cd93f4d6442a95115c1ceabd026cab80b1c81f4c4b"} Jan 30 00:12:55 crc kubenswrapper[5119]: I0130 00:12:55.398577 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-12-crc" event={"ID":"0f34372a-8c67-4ddf-85e2-14c85e52fc7e","Type":"ContainerStarted","Data":"06af590206f86f3302d04d8b01c0ba33c17ec1ba40ceedaffe1bb9cf7fe275a1"} Jan 30 00:12:55 crc kubenswrapper[5119]: I0130 00:12:55.400759 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xrxfc" event={"ID":"b956d3a0-19db-47f7-bc95-b3371c1e7968","Type":"ContainerStarted","Data":"1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f"} Jan 30 00:12:55 crc kubenswrapper[5119]: I0130 00:12:55.402794 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29jhj" event={"ID":"f304f5b4-43b0-460e-b28c-f2af396db9a1","Type":"ContainerStarted","Data":"4dddee7f49d725579f1eeae60e88826d7fa3a2c6be6656cb224285b2a8deee2a"} Jan 30 00:12:55 crc kubenswrapper[5119]: I0130 00:12:55.404528 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" event={"ID":"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6","Type":"ContainerStarted","Data":"d32a8a187a68cc7cb3989c18cff3ebdd551562df85fb9ce5ba423fdfc0b302b3"} Jan 30 00:12:55 crc kubenswrapper[5119]: I0130 00:12:55.406329 5119 generic.go:358] "Generic (PLEG): container finished" podID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerID="170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408" exitCode=0 Jan 30 00:12:55 crc kubenswrapper[5119]: I0130 00:12:55.406452 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqrl2" event={"ID":"88f818c0-b63a-4707-a2ab-49c0e9c58544","Type":"ContainerDied","Data":"170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408"} Jan 30 00:12:55 crc kubenswrapper[5119]: I0130 00:12:55.408627 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-12-crc" event={"ID":"d52cae6f-022b-494d-bc48-0dab7b961ad2","Type":"ContainerStarted","Data":"1793af5fbcc393b807088f204450d9f9fdf9c838c2332d24562ae9e47b8051e1"} Jan 30 00:12:56 crc kubenswrapper[5119]: I0130 00:12:56.418304 5119 generic.go:358] "Generic (PLEG): container finished" podID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerID="d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2" exitCode=0 Jan 30 00:12:56 crc kubenswrapper[5119]: I0130 00:12:56.418413 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2jg89" event={"ID":"c0a462d2-1b1f-47e8-9c33-1700f405a90d","Type":"ContainerDied","Data":"d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2"} Jan 30 00:12:56 crc kubenswrapper[5119]: I0130 00:12:56.420943 5119 generic.go:358] "Generic (PLEG): container finished" podID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerID="a38e2183b5d6096ef28362cd93f4d6442a95115c1ceabd026cab80b1c81f4c4b" exitCode=0 Jan 30 00:12:56 crc kubenswrapper[5119]: I0130 00:12:56.421028 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zsl" event={"ID":"868949ba-fadb-4cc1-88d4-503a4049f66d","Type":"ContainerDied","Data":"a38e2183b5d6096ef28362cd93f4d6442a95115c1ceabd026cab80b1c81f4c4b"} Jan 30 00:12:56 crc kubenswrapper[5119]: I0130 00:12:56.423014 5119 generic.go:358] "Generic (PLEG): container finished" podID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerID="1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f" exitCode=0 Jan 30 00:12:56 crc kubenswrapper[5119]: I0130 00:12:56.423044 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xrxfc" event={"ID":"b956d3a0-19db-47f7-bc95-b3371c1e7968","Type":"ContainerDied","Data":"1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f"} Jan 30 00:12:56 crc kubenswrapper[5119]: I0130 00:12:56.424775 5119 generic.go:358] "Generic (PLEG): container finished" podID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerID="4dddee7f49d725579f1eeae60e88826d7fa3a2c6be6656cb224285b2a8deee2a" exitCode=0 Jan 30 00:12:56 crc kubenswrapper[5119]: I0130 00:12:56.424883 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29jhj" event={"ID":"f304f5b4-43b0-460e-b28c-f2af396db9a1","Type":"ContainerDied","Data":"4dddee7f49d725579f1eeae60e88826d7fa3a2c6be6656cb224285b2a8deee2a"} Jan 30 00:12:58 crc kubenswrapper[5119]: I0130 00:12:58.336298 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:12:58 crc kubenswrapper[5119]: I0130 00:12:58.336813 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:12:59 crc kubenswrapper[5119]: I0130 00:12:59.444078 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vf8tm" event={"ID":"af9d2341-a591-42b6-91e5-8177924152b8","Type":"ContainerStarted","Data":"ebacac0d3bbc090ee3fd1802367960fb118415ac2bbcc63a00b06c42140e46f3"} Jan 30 00:12:59 crc kubenswrapper[5119]: I0130 00:12:59.445905 5119 generic.go:358] "Generic (PLEG): container finished" podID="d52cae6f-022b-494d-bc48-0dab7b961ad2" containerID="1793af5fbcc393b807088f204450d9f9fdf9c838c2332d24562ae9e47b8051e1" exitCode=0 Jan 30 00:12:59 crc kubenswrapper[5119]: I0130 00:12:59.446049 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-12-crc" event={"ID":"d52cae6f-022b-494d-bc48-0dab7b961ad2","Type":"ContainerDied","Data":"1793af5fbcc393b807088f204450d9f9fdf9c838c2332d24562ae9e47b8051e1"} Jan 30 00:13:00 crc kubenswrapper[5119]: I0130 00:13:00.453237 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-889qp" event={"ID":"b5189257-f48b-458c-a523-2c1d73cd3f63","Type":"ContainerStarted","Data":"36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9"} Jan 30 00:13:00 crc kubenswrapper[5119]: I0130 00:13:00.454660 5119 generic.go:358] "Generic (PLEG): container finished" podID="af9d2341-a591-42b6-91e5-8177924152b8" containerID="ebacac0d3bbc090ee3fd1802367960fb118415ac2bbcc63a00b06c42140e46f3" exitCode=0 Jan 30 00:13:00 crc kubenswrapper[5119]: I0130 00:13:00.454822 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vf8tm" event={"ID":"af9d2341-a591-42b6-91e5-8177924152b8","Type":"ContainerDied","Data":"ebacac0d3bbc090ee3fd1802367960fb118415ac2bbcc63a00b06c42140e46f3"} Jan 30 00:13:00 crc kubenswrapper[5119]: I0130 00:13:00.679365 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:13:00 crc kubenswrapper[5119]: I0130 00:13:00.775018 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d52cae6f-022b-494d-bc48-0dab7b961ad2-kubelet-dir\") pod \"d52cae6f-022b-494d-bc48-0dab7b961ad2\" (UID: \"d52cae6f-022b-494d-bc48-0dab7b961ad2\") " Jan 30 00:13:00 crc kubenswrapper[5119]: I0130 00:13:00.775124 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d52cae6f-022b-494d-bc48-0dab7b961ad2-kube-api-access\") pod \"d52cae6f-022b-494d-bc48-0dab7b961ad2\" (UID: \"d52cae6f-022b-494d-bc48-0dab7b961ad2\") " Jan 30 00:13:00 crc kubenswrapper[5119]: I0130 00:13:00.775138 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d52cae6f-022b-494d-bc48-0dab7b961ad2-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d52cae6f-022b-494d-bc48-0dab7b961ad2" (UID: "d52cae6f-022b-494d-bc48-0dab7b961ad2"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:00 crc kubenswrapper[5119]: I0130 00:13:00.775319 5119 reconciler_common.go:299] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d52cae6f-022b-494d-bc48-0dab7b961ad2-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:00 crc kubenswrapper[5119]: I0130 00:13:00.780233 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d52cae6f-022b-494d-bc48-0dab7b961ad2-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d52cae6f-022b-494d-bc48-0dab7b961ad2" (UID: "d52cae6f-022b-494d-bc48-0dab7b961ad2"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:00 crc kubenswrapper[5119]: I0130 00:13:00.876699 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d52cae6f-022b-494d-bc48-0dab7b961ad2-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:01 crc kubenswrapper[5119]: I0130 00:13:01.207321 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" podStartSLOduration=165.207302427 podStartE2EDuration="2m45.207302427s" podCreationTimestamp="2026-01-30 00:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:13:01.205477192 +0000 UTC m=+185.219539661" watchObservedRunningTime="2026-01-30 00:13:01.207302427 +0000 UTC m=+185.221364896" Jan 30 00:13:01 crc kubenswrapper[5119]: I0130 00:13:01.226030 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-12-crc" podStartSLOduration=17.226006554 podStartE2EDuration="17.226006554s" podCreationTimestamp="2026-01-30 00:12:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:13:01.221587056 +0000 UTC m=+185.235649515" watchObservedRunningTime="2026-01-30 00:13:01.226006554 +0000 UTC m=+185.240069023" Jan 30 00:13:01 crc kubenswrapper[5119]: I0130 00:13:01.461567 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-12-crc" event={"ID":"d52cae6f-022b-494d-bc48-0dab7b961ad2","Type":"ContainerDied","Data":"3be20d9b01287da468bb1f9b98d2b61799653c741b9658bb40ed9b400c621810"} Jan 30 00:13:01 crc kubenswrapper[5119]: I0130 00:13:01.462759 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3be20d9b01287da468bb1f9b98d2b61799653c741b9658bb40ed9b400c621810" Jan 30 00:13:01 crc kubenswrapper[5119]: I0130 00:13:01.461784 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:13:01 crc kubenswrapper[5119]: I0130 00:13:01.471687 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:13:01 crc kubenswrapper[5119]: I0130 00:13:01.471743 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:13:02 crc kubenswrapper[5119]: I0130 00:13:02.468377 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmlxg" event={"ID":"c1343c5b-f89c-4044-9a5a-36db14bb5ed9","Type":"ContainerStarted","Data":"7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6"} Jan 30 00:13:03 crc kubenswrapper[5119]: I0130 00:13:03.474767 5119 generic.go:358] "Generic (PLEG): container finished" podID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerID="36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9" exitCode=0 Jan 30 00:13:03 crc kubenswrapper[5119]: I0130 00:13:03.474838 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-889qp" event={"ID":"b5189257-f48b-458c-a523-2c1d73cd3f63","Type":"ContainerDied","Data":"36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9"} Jan 30 00:13:03 crc kubenswrapper[5119]: I0130 00:13:03.477763 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29jhj" event={"ID":"f304f5b4-43b0-460e-b28c-f2af396db9a1","Type":"ContainerStarted","Data":"d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb"} Jan 30 00:13:03 crc kubenswrapper[5119]: I0130 00:13:03.489156 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqrl2" event={"ID":"88f818c0-b63a-4707-a2ab-49c0e9c58544","Type":"ContainerStarted","Data":"a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604"} Jan 30 00:13:04 crc kubenswrapper[5119]: I0130 00:13:04.320930 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xmlxg" podStartSLOduration=22.574827613 podStartE2EDuration="1m2.32091048s" podCreationTimestamp="2026-01-30 00:12:02 +0000 UTC" firstStartedPulling="2026-01-30 00:12:05.051375793 +0000 UTC m=+129.065438252" lastFinishedPulling="2026-01-30 00:12:44.79745866 +0000 UTC m=+168.811521119" observedRunningTime="2026-01-30 00:13:04.319557247 +0000 UTC m=+188.333619716" watchObservedRunningTime="2026-01-30 00:13:04.32091048 +0000 UTC m=+188.334972959" Jan 30 00:13:04 crc kubenswrapper[5119]: I0130 00:13:04.495442 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xrxfc" event={"ID":"b956d3a0-19db-47f7-bc95-b3371c1e7968","Type":"ContainerStarted","Data":"44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283"} Jan 30 00:13:05 crc kubenswrapper[5119]: I0130 00:13:05.342125 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jqrl2" podStartSLOduration=24.351747111 podStartE2EDuration="1m5.342111578s" podCreationTimestamp="2026-01-30 00:12:00 +0000 UTC" firstStartedPulling="2026-01-30 00:12:04.018070457 +0000 UTC m=+128.032132916" lastFinishedPulling="2026-01-30 00:12:45.008434914 +0000 UTC m=+169.022497383" observedRunningTime="2026-01-30 00:13:05.340504439 +0000 UTC m=+189.354566918" watchObservedRunningTime="2026-01-30 00:13:05.342111578 +0000 UTC m=+189.356174037" Jan 30 00:13:06 crc kubenswrapper[5119]: I0130 00:13:06.507475 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2jg89" event={"ID":"c0a462d2-1b1f-47e8-9c33-1700f405a90d","Type":"ContainerStarted","Data":"08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6"} Jan 30 00:13:07 crc kubenswrapper[5119]: I0130 00:13:07.516777 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zsl" event={"ID":"868949ba-fadb-4cc1-88d4-503a4049f66d","Type":"ContainerStarted","Data":"bc32b5ec6ff0f408bf414bbbdbcdba5b7269229a507bbead3adcf7b1f7b84a39"} Jan 30 00:13:08 crc kubenswrapper[5119]: I0130 00:13:08.092092 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-29jhj" podStartSLOduration=28.068063747 podStartE2EDuration="1m8.092072599s" podCreationTimestamp="2026-01-30 00:12:00 +0000 UTC" firstStartedPulling="2026-01-30 00:12:05.038845362 +0000 UTC m=+129.052907821" lastFinishedPulling="2026-01-30 00:12:45.062854214 +0000 UTC m=+169.076916673" observedRunningTime="2026-01-30 00:13:06.93737961 +0000 UTC m=+190.951442089" watchObservedRunningTime="2026-01-30 00:13:08.092072599 +0000 UTC m=+192.106135058" Jan 30 00:13:08 crc kubenswrapper[5119]: I0130 00:13:08.106365 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xrxfc" podStartSLOduration=26.095712354 podStartE2EDuration="1m6.106352428s" podCreationTimestamp="2026-01-30 00:12:02 +0000 UTC" firstStartedPulling="2026-01-30 00:12:05.032518479 +0000 UTC m=+129.046580938" lastFinishedPulling="2026-01-30 00:12:45.043158553 +0000 UTC m=+169.057221012" observedRunningTime="2026-01-30 00:13:08.090129161 +0000 UTC m=+192.104191631" watchObservedRunningTime="2026-01-30 00:13:08.106352428 +0000 UTC m=+192.120414887" Jan 30 00:13:08 crc kubenswrapper[5119]: I0130 00:13:08.107821 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2jg89" podStartSLOduration=27.05831132 podStartE2EDuration="1m8.107784483s" podCreationTimestamp="2026-01-30 00:12:00 +0000 UTC" firstStartedPulling="2026-01-30 00:12:03.965123992 +0000 UTC m=+127.979186451" lastFinishedPulling="2026-01-30 00:12:45.014597165 +0000 UTC m=+169.028659614" observedRunningTime="2026-01-30 00:13:08.104688067 +0000 UTC m=+192.118750526" watchObservedRunningTime="2026-01-30 00:13:08.107784483 +0000 UTC m=+192.121846942" Jan 30 00:13:08 crc kubenswrapper[5119]: I0130 00:13:08.335975 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:13:08 crc kubenswrapper[5119]: I0130 00:13:08.336334 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:13:08 crc kubenswrapper[5119]: I0130 00:13:08.524431 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vf8tm" event={"ID":"af9d2341-a591-42b6-91e5-8177924152b8","Type":"ContainerStarted","Data":"c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43"} Jan 30 00:13:08 crc kubenswrapper[5119]: I0130 00:13:08.545801 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z9zsl" podStartSLOduration=27.499104927 podStartE2EDuration="1m8.545783083s" podCreationTimestamp="2026-01-30 00:12:00 +0000 UTC" firstStartedPulling="2026-01-30 00:12:03.967914779 +0000 UTC m=+127.981977238" lastFinishedPulling="2026-01-30 00:12:45.014592925 +0000 UTC m=+169.028655394" observedRunningTime="2026-01-30 00:13:08.540260328 +0000 UTC m=+192.554322797" watchObservedRunningTime="2026-01-30 00:13:08.545783083 +0000 UTC m=+192.559845552" Jan 30 00:13:09 crc kubenswrapper[5119]: I0130 00:13:09.531432 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-889qp" event={"ID":"b5189257-f48b-458c-a523-2c1d73cd3f63","Type":"ContainerStarted","Data":"8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023"} Jan 30 00:13:09 crc kubenswrapper[5119]: I0130 00:13:09.553125 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vf8tm" podStartSLOduration=21.839793673 podStartE2EDuration="1m6.553091861s" podCreationTimestamp="2026-01-30 00:12:03 +0000 UTC" firstStartedPulling="2026-01-30 00:12:07.069682126 +0000 UTC m=+131.083744585" lastFinishedPulling="2026-01-30 00:12:51.782980284 +0000 UTC m=+175.797042773" observedRunningTime="2026-01-30 00:13:09.547225307 +0000 UTC m=+193.561287766" watchObservedRunningTime="2026-01-30 00:13:09.553091861 +0000 UTC m=+193.567154350" Jan 30 00:13:09 crc kubenswrapper[5119]: I0130 00:13:09.563949 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-889qp" podStartSLOduration=21.783839785 podStartE2EDuration="1m6.563920835s" podCreationTimestamp="2026-01-30 00:12:03 +0000 UTC" firstStartedPulling="2026-01-30 00:12:07.072614417 +0000 UTC m=+131.086676876" lastFinishedPulling="2026-01-30 00:12:51.852695467 +0000 UTC m=+175.866757926" observedRunningTime="2026-01-30 00:13:09.562603193 +0000 UTC m=+193.576665662" watchObservedRunningTime="2026-01-30 00:13:09.563920835 +0000 UTC m=+193.577983314" Jan 30 00:13:10 crc kubenswrapper[5119]: I0130 00:13:10.607903 5119 ???:1] "http: TLS handshake error from 192.168.126.11:59080: no serving certificate available for the kubelet" Jan 30 00:13:10 crc kubenswrapper[5119]: I0130 00:13:10.850274 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:13:10 crc kubenswrapper[5119]: I0130 00:13:10.850359 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:13:10 crc kubenswrapper[5119]: I0130 00:13:10.989641 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:13:10 crc kubenswrapper[5119]: I0130 00:13:10.989703 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.184995 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.189732 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.322846 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.322893 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.471122 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.471185 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.471233 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.471692 5119 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"2219fc3e88342e5a75629a3c1792df7c0bc7a93155946a8e442892451fae96e0"} pod="openshift-console/downloads-747b44746d-nts9m" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.471733 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" containerID="cri-o://2219fc3e88342e5a75629a3c1792df7c0bc7a93155946a8e442892451fae96e0" gracePeriod=2 Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.471959 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.472026 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.528048 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:13:11 crc kubenswrapper[5119]: I0130 00:13:11.528149 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:13:12 crc kubenswrapper[5119]: I0130 00:13:12.402599 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:13:12 crc kubenswrapper[5119]: I0130 00:13:12.405054 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:13:12 crc kubenswrapper[5119]: I0130 00:13:12.405731 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:13:12 crc kubenswrapper[5119]: I0130 00:13:12.406031 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:13:12 crc kubenswrapper[5119]: I0130 00:13:12.446246 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:13:12 crc kubenswrapper[5119]: I0130 00:13:12.448917 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:13:12 crc kubenswrapper[5119]: I0130 00:13:12.450683 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:13:12 crc kubenswrapper[5119]: I0130 00:13:12.455334 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:13:13 crc kubenswrapper[5119]: I0130 00:13:13.167899 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:13:13 crc kubenswrapper[5119]: I0130 00:13:13.167958 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:13:13 crc kubenswrapper[5119]: I0130 00:13:13.203816 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:13:13 crc kubenswrapper[5119]: I0130 00:13:13.374263 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:13:13 crc kubenswrapper[5119]: I0130 00:13:13.374365 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:13:13 crc kubenswrapper[5119]: I0130 00:13:13.411352 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:13:13 crc kubenswrapper[5119]: I0130 00:13:13.599474 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:13:13 crc kubenswrapper[5119]: I0130 00:13:13.600410 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:13:13 crc kubenswrapper[5119]: I0130 00:13:13.990972 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:13:13 crc kubenswrapper[5119]: I0130 00:13:13.991384 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.048472 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.296585 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.297109 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.332800 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.569806 5119 generic.go:358] "Generic (PLEG): container finished" podID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerID="2219fc3e88342e5a75629a3c1792df7c0bc7a93155946a8e442892451fae96e0" exitCode=0 Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.569879 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-747b44746d-nts9m" event={"ID":"ff01653b-8f2a-47a1-ae0c-0ac878c25570","Type":"ContainerDied","Data":"2219fc3e88342e5a75629a3c1792df7c0bc7a93155946a8e442892451fae96e0"} Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.569930 5119 scope.go:117] "RemoveContainer" containerID="2931176e63698b597fb7d7b475846b4ee8596e28f4e8f58aaab85d8027d44f04" Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.610241 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.667462 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.703820 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z9zsl"] Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.704282 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z9zsl" podUID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerName="registry-server" containerID="cri-o://bc32b5ec6ff0f408bf414bbbdbcdba5b7269229a507bbead3adcf7b1f7b84a39" gracePeriod=2 Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.897892 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-29jhj"] Jan 30 00:13:14 crc kubenswrapper[5119]: I0130 00:13:14.898417 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/community-operators-29jhj" podUID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerName="registry-server" containerID="cri-o://d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb" gracePeriod=2 Jan 30 00:13:17 crc kubenswrapper[5119]: I0130 00:13:17.096494 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmlxg"] Jan 30 00:13:17 crc kubenswrapper[5119]: I0130 00:13:17.097673 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xmlxg" podUID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerName="registry-server" containerID="cri-o://7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6" gracePeriod=2 Jan 30 00:13:17 crc kubenswrapper[5119]: I0130 00:13:17.295635 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vf8tm"] Jan 30 00:13:17 crc kubenswrapper[5119]: I0130 00:13:17.589097 5119 generic.go:358] "Generic (PLEG): container finished" podID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerID="bc32b5ec6ff0f408bf414bbbdbcdba5b7269229a507bbead3adcf7b1f7b84a39" exitCode=0 Jan 30 00:13:17 crc kubenswrapper[5119]: I0130 00:13:17.589182 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zsl" event={"ID":"868949ba-fadb-4cc1-88d4-503a4049f66d","Type":"ContainerDied","Data":"bc32b5ec6ff0f408bf414bbbdbcdba5b7269229a507bbead3adcf7b1f7b84a39"} Jan 30 00:13:17 crc kubenswrapper[5119]: I0130 00:13:17.589757 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vf8tm" podUID="af9d2341-a591-42b6-91e5-8177924152b8" containerName="registry-server" containerID="cri-o://c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43" gracePeriod=2 Jan 30 00:13:18 crc kubenswrapper[5119]: I0130 00:13:18.596185 5119 generic.go:358] "Generic (PLEG): container finished" podID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerID="d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb" exitCode=0 Jan 30 00:13:18 crc kubenswrapper[5119]: I0130 00:13:18.596383 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29jhj" event={"ID":"f304f5b4-43b0-460e-b28c-f2af396db9a1","Type":"ContainerDied","Data":"d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb"} Jan 30 00:13:18 crc kubenswrapper[5119]: I0130 00:13:18.598926 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-747b44746d-nts9m" event={"ID":"ff01653b-8f2a-47a1-ae0c-0ac878c25570","Type":"ContainerStarted","Data":"3b09d8d90d1c16f7e87ad6229aa5486c9d84b32fc3343d4f1a7a5048ee98624c"} Jan 30 00:13:18 crc kubenswrapper[5119]: I0130 00:13:18.993865 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.123159 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-catalog-content\") pod \"868949ba-fadb-4cc1-88d4-503a4049f66d\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.123237 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8q82v\" (UniqueName: \"kubernetes.io/projected/868949ba-fadb-4cc1-88d4-503a4049f66d-kube-api-access-8q82v\") pod \"868949ba-fadb-4cc1-88d4-503a4049f66d\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.123360 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-utilities\") pod \"868949ba-fadb-4cc1-88d4-503a4049f66d\" (UID: \"868949ba-fadb-4cc1-88d4-503a4049f66d\") " Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.125084 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-utilities" (OuterVolumeSpecName: "utilities") pod "868949ba-fadb-4cc1-88d4-503a4049f66d" (UID: "868949ba-fadb-4cc1-88d4-503a4049f66d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.129996 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.155893 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/868949ba-fadb-4cc1-88d4-503a4049f66d-kube-api-access-8q82v" (OuterVolumeSpecName: "kube-api-access-8q82v") pod "868949ba-fadb-4cc1-88d4-503a4049f66d" (UID: "868949ba-fadb-4cc1-88d4-503a4049f66d"). InnerVolumeSpecName "kube-api-access-8q82v". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.230845 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8q82v\" (UniqueName: \"kubernetes.io/projected/868949ba-fadb-4cc1-88d4-503a4049f66d-kube-api-access-8q82v\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.607818 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zsl" event={"ID":"868949ba-fadb-4cc1-88d4-503a4049f66d","Type":"ContainerDied","Data":"0448985a2c6ebcbc7c59cfc3dd291703bbc70a751a7e1cbe1e7f96022797737e"} Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.607871 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9zsl" Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.607902 5119 scope.go:117] "RemoveContainer" containerID="bc32b5ec6ff0f408bf414bbbdbcdba5b7269229a507bbead3adcf7b1f7b84a39" Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.626036 5119 scope.go:117] "RemoveContainer" containerID="a38e2183b5d6096ef28362cd93f4d6442a95115c1ceabd026cab80b1c81f4c4b" Jan 30 00:13:19 crc kubenswrapper[5119]: I0130 00:13:19.639136 5119 scope.go:117] "RemoveContainer" containerID="865a96337a7d93d869b72c4e68ee626bfc37e130ff1192f6fdd9ea9fee137e83" Jan 30 00:13:20 crc kubenswrapper[5119]: I0130 00:13:20.625040 5119 generic.go:358] "Generic (PLEG): container finished" podID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerID="7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6" exitCode=0 Jan 30 00:13:20 crc kubenswrapper[5119]: I0130 00:13:20.625122 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmlxg" event={"ID":"c1343c5b-f89c-4044-9a5a-36db14bb5ed9","Type":"ContainerDied","Data":"7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6"} Jan 30 00:13:20 crc kubenswrapper[5119]: I0130 00:13:20.655911 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "868949ba-fadb-4cc1-88d4-503a4049f66d" (UID: "868949ba-fadb-4cc1-88d4-503a4049f66d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:20 crc kubenswrapper[5119]: I0130 00:13:20.755165 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/868949ba-fadb-4cc1-88d4-503a4049f66d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:20 crc kubenswrapper[5119]: I0130 00:13:20.836915 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z9zsl"] Jan 30 00:13:20 crc kubenswrapper[5119]: I0130 00:13:20.840378 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z9zsl"] Jan 30 00:13:21 crc kubenswrapper[5119]: I0130 00:13:21.472307 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:13:21 crc kubenswrapper[5119]: I0130 00:13:21.472364 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:13:21 crc kubenswrapper[5119]: I0130 00:13:21.632747 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:13:21 crc kubenswrapper[5119]: I0130 00:13:21.632955 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:13:21 crc kubenswrapper[5119]: I0130 00:13:21.633006 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:13:22 crc kubenswrapper[5119]: E0130 00:13:22.406561 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb is running failed: container process not found" containerID="d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 00:13:22 crc kubenswrapper[5119]: E0130 00:13:22.407492 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb is running failed: container process not found" containerID="d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 00:13:22 crc kubenswrapper[5119]: E0130 00:13:22.407784 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb is running failed: container process not found" containerID="d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 00:13:22 crc kubenswrapper[5119]: E0130 00:13:22.407821 5119 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-29jhj" podUID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerName="registry-server" probeResult="unknown" Jan 30 00:13:22 crc kubenswrapper[5119]: I0130 00:13:22.756047 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="868949ba-fadb-4cc1-88d4-503a4049f66d" path="/var/lib/kubelet/pods/868949ba-fadb-4cc1-88d4-503a4049f66d/volumes" Jan 30 00:13:23 crc kubenswrapper[5119]: E0130 00:13:23.565034 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6 is running failed: container process not found" containerID="7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 00:13:23 crc kubenswrapper[5119]: E0130 00:13:23.565970 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6 is running failed: container process not found" containerID="7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 00:13:23 crc kubenswrapper[5119]: E0130 00:13:23.566569 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6 is running failed: container process not found" containerID="7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 00:13:23 crc kubenswrapper[5119]: E0130 00:13:23.566669 5119 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-xmlxg" podUID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerName="registry-server" probeResult="unknown" Jan 30 00:13:24 crc kubenswrapper[5119]: I0130 00:13:24.371101 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:13:24 crc kubenswrapper[5119]: I0130 00:13:24.371180 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:13:24 crc kubenswrapper[5119]: E0130 00:13:24.571554 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43 is running failed: container process not found" containerID="c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 00:13:24 crc kubenswrapper[5119]: E0130 00:13:24.571842 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43 is running failed: container process not found" containerID="c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 00:13:24 crc kubenswrapper[5119]: E0130 00:13:24.572109 5119 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43 is running failed: container process not found" containerID="c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 00:13:24 crc kubenswrapper[5119]: E0130 00:13:24.572166 5119 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-vf8tm" podUID="af9d2341-a591-42b6-91e5-8177924152b8" containerName="registry-server" probeResult="unknown" Jan 30 00:13:24 crc kubenswrapper[5119]: I0130 00:13:24.951718 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:13:25 crc kubenswrapper[5119]: I0130 00:13:25.009982 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-catalog-content\") pod \"f304f5b4-43b0-460e-b28c-f2af396db9a1\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " Jan 30 00:13:25 crc kubenswrapper[5119]: I0130 00:13:25.010087 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jnjj\" (UniqueName: \"kubernetes.io/projected/f304f5b4-43b0-460e-b28c-f2af396db9a1-kube-api-access-8jnjj\") pod \"f304f5b4-43b0-460e-b28c-f2af396db9a1\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " Jan 30 00:13:25 crc kubenswrapper[5119]: I0130 00:13:25.010129 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-utilities\") pod \"f304f5b4-43b0-460e-b28c-f2af396db9a1\" (UID: \"f304f5b4-43b0-460e-b28c-f2af396db9a1\") " Jan 30 00:13:25 crc kubenswrapper[5119]: I0130 00:13:25.011932 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-utilities" (OuterVolumeSpecName: "utilities") pod "f304f5b4-43b0-460e-b28c-f2af396db9a1" (UID: "f304f5b4-43b0-460e-b28c-f2af396db9a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:25 crc kubenswrapper[5119]: I0130 00:13:25.018073 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f304f5b4-43b0-460e-b28c-f2af396db9a1-kube-api-access-8jnjj" (OuterVolumeSpecName: "kube-api-access-8jnjj") pod "f304f5b4-43b0-460e-b28c-f2af396db9a1" (UID: "f304f5b4-43b0-460e-b28c-f2af396db9a1"). InnerVolumeSpecName "kube-api-access-8jnjj". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:25 crc kubenswrapper[5119]: I0130 00:13:25.111167 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8jnjj\" (UniqueName: \"kubernetes.io/projected/f304f5b4-43b0-460e-b28c-f2af396db9a1-kube-api-access-8jnjj\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:25 crc kubenswrapper[5119]: I0130 00:13:25.111204 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:25 crc kubenswrapper[5119]: I0130 00:13:25.907347 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f304f5b4-43b0-460e-b28c-f2af396db9a1" (UID: "f304f5b4-43b0-460e-b28c-f2af396db9a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:25 crc kubenswrapper[5119]: I0130 00:13:25.919749 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f304f5b4-43b0-460e-b28c-f2af396db9a1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:26 crc kubenswrapper[5119]: I0130 00:13:26.372497 5119 generic.go:358] "Generic (PLEG): container finished" podID="af9d2341-a591-42b6-91e5-8177924152b8" containerID="c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43" exitCode=0 Jan 30 00:13:26 crc kubenswrapper[5119]: I0130 00:13:26.372590 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vf8tm" event={"ID":"af9d2341-a591-42b6-91e5-8177924152b8","Type":"ContainerDied","Data":"c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43"} Jan 30 00:13:26 crc kubenswrapper[5119]: I0130 00:13:26.373719 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:13:26 crc kubenswrapper[5119]: I0130 00:13:26.373764 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:13:27 crc kubenswrapper[5119]: I0130 00:13:27.379562 5119 generic.go:358] "Generic (PLEG): container finished" podID="31630771-ce1a-4418-89ce-c58d6bf3c61f" containerID="a54c15a37dfe3ba62cfd506d684379fd8c837b8f83f7249906f7a4cd352af63e" exitCode=0 Jan 30 00:13:27 crc kubenswrapper[5119]: I0130 00:13:27.379665 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29495520-tvgs8" event={"ID":"31630771-ce1a-4418-89ce-c58d6bf3c61f","Type":"ContainerDied","Data":"a54c15a37dfe3ba62cfd506d684379fd8c837b8f83f7249906f7a4cd352af63e"} Jan 30 00:13:27 crc kubenswrapper[5119]: I0130 00:13:27.381741 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29jhj" Jan 30 00:13:27 crc kubenswrapper[5119]: I0130 00:13:27.381754 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29jhj" event={"ID":"f304f5b4-43b0-460e-b28c-f2af396db9a1","Type":"ContainerDied","Data":"f1863e2f0ab0fc798af09d6b0fcce0062b74c51096b456624e38b207599a2da8"} Jan 30 00:13:27 crc kubenswrapper[5119]: I0130 00:13:27.381820 5119 scope.go:117] "RemoveContainer" containerID="d2ac90d13d9ca229149fc109e5cee7120e3162a55b8b4a0a2b499dfdd4e90fdb" Jan 30 00:13:27 crc kubenswrapper[5119]: I0130 00:13:27.404080 5119 scope.go:117] "RemoveContainer" containerID="4dddee7f49d725579f1eeae60e88826d7fa3a2c6be6656cb224285b2a8deee2a" Jan 30 00:13:27 crc kubenswrapper[5119]: I0130 00:13:27.404078 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-29jhj"] Jan 30 00:13:27 crc kubenswrapper[5119]: I0130 00:13:27.406647 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-29jhj"] Jan 30 00:13:27 crc kubenswrapper[5119]: I0130 00:13:27.423683 5119 scope.go:117] "RemoveContainer" containerID="1213d75c9903cd7e76bd5b5f15b35bf3c4f7cff8a224710a4abe224f1021aaa9" Jan 30 00:13:28 crc kubenswrapper[5119]: I0130 00:13:28.574192 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29495520-tvgs8" Jan 30 00:13:28 crc kubenswrapper[5119]: I0130 00:13:28.663835 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4ws9\" (UniqueName: \"kubernetes.io/projected/31630771-ce1a-4418-89ce-c58d6bf3c61f-kube-api-access-f4ws9\") pod \"31630771-ce1a-4418-89ce-c58d6bf3c61f\" (UID: \"31630771-ce1a-4418-89ce-c58d6bf3c61f\") " Jan 30 00:13:28 crc kubenswrapper[5119]: I0130 00:13:28.663905 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/31630771-ce1a-4418-89ce-c58d6bf3c61f-serviceca\") pod \"31630771-ce1a-4418-89ce-c58d6bf3c61f\" (UID: \"31630771-ce1a-4418-89ce-c58d6bf3c61f\") " Jan 30 00:13:28 crc kubenswrapper[5119]: I0130 00:13:28.664899 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31630771-ce1a-4418-89ce-c58d6bf3c61f-serviceca" (OuterVolumeSpecName: "serviceca") pod "31630771-ce1a-4418-89ce-c58d6bf3c61f" (UID: "31630771-ce1a-4418-89ce-c58d6bf3c61f"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:13:28 crc kubenswrapper[5119]: I0130 00:13:28.670106 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31630771-ce1a-4418-89ce-c58d6bf3c61f-kube-api-access-f4ws9" (OuterVolumeSpecName: "kube-api-access-f4ws9") pod "31630771-ce1a-4418-89ce-c58d6bf3c61f" (UID: "31630771-ce1a-4418-89ce-c58d6bf3c61f"). InnerVolumeSpecName "kube-api-access-f4ws9". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:28 crc kubenswrapper[5119]: I0130 00:13:28.756067 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f304f5b4-43b0-460e-b28c-f2af396db9a1" path="/var/lib/kubelet/pods/f304f5b4-43b0-460e-b28c-f2af396db9a1/volumes" Jan 30 00:13:28 crc kubenswrapper[5119]: I0130 00:13:28.765329 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-f4ws9\" (UniqueName: \"kubernetes.io/projected/31630771-ce1a-4418-89ce-c58d6bf3c61f-kube-api-access-f4ws9\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:28 crc kubenswrapper[5119]: I0130 00:13:28.765367 5119 reconciler_common.go:299] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/31630771-ce1a-4418-89ce-c58d6bf3c61f-serviceca\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.397669 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29495520-tvgs8" Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.397684 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29495520-tvgs8" event={"ID":"31630771-ce1a-4418-89ce-c58d6bf3c61f","Type":"ContainerDied","Data":"21984f541a7c8b0c4c6a3160111a903810522908fb79b27cfc59d522f3a61a97"} Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.398171 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21984f541a7c8b0c4c6a3160111a903810522908fb79b27cfc59d522f3a61a97" Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.619832 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.676077 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-utilities\") pod \"af9d2341-a591-42b6-91e5-8177924152b8\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.676167 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-catalog-content\") pod \"af9d2341-a591-42b6-91e5-8177924152b8\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.676353 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggsl8\" (UniqueName: \"kubernetes.io/projected/af9d2341-a591-42b6-91e5-8177924152b8-kube-api-access-ggsl8\") pod \"af9d2341-a591-42b6-91e5-8177924152b8\" (UID: \"af9d2341-a591-42b6-91e5-8177924152b8\") " Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.677319 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-utilities" (OuterVolumeSpecName: "utilities") pod "af9d2341-a591-42b6-91e5-8177924152b8" (UID: "af9d2341-a591-42b6-91e5-8177924152b8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.680772 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af9d2341-a591-42b6-91e5-8177924152b8-kube-api-access-ggsl8" (OuterVolumeSpecName: "kube-api-access-ggsl8") pod "af9d2341-a591-42b6-91e5-8177924152b8" (UID: "af9d2341-a591-42b6-91e5-8177924152b8"). InnerVolumeSpecName "kube-api-access-ggsl8". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.778225 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ggsl8\" (UniqueName: \"kubernetes.io/projected/af9d2341-a591-42b6-91e5-8177924152b8-kube-api-access-ggsl8\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:29 crc kubenswrapper[5119]: I0130 00:13:29.778489 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.078410 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.184482 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-utilities\") pod \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.184522 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-catalog-content\") pod \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.184554 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dfwj\" (UniqueName: \"kubernetes.io/projected/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-kube-api-access-9dfwj\") pod \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\" (UID: \"c1343c5b-f89c-4044-9a5a-36db14bb5ed9\") " Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.185483 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-utilities" (OuterVolumeSpecName: "utilities") pod "c1343c5b-f89c-4044-9a5a-36db14bb5ed9" (UID: "c1343c5b-f89c-4044-9a5a-36db14bb5ed9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.188843 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-kube-api-access-9dfwj" (OuterVolumeSpecName: "kube-api-access-9dfwj") pod "c1343c5b-f89c-4044-9a5a-36db14bb5ed9" (UID: "c1343c5b-f89c-4044-9a5a-36db14bb5ed9"). InnerVolumeSpecName "kube-api-access-9dfwj". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.195443 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c1343c5b-f89c-4044-9a5a-36db14bb5ed9" (UID: "c1343c5b-f89c-4044-9a5a-36db14bb5ed9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.297271 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.297602 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.297683 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-9dfwj\" (UniqueName: \"kubernetes.io/projected/c1343c5b-f89c-4044-9a5a-36db14bb5ed9-kube-api-access-9dfwj\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.299306 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "af9d2341-a591-42b6-91e5-8177924152b8" (UID: "af9d2341-a591-42b6-91e5-8177924152b8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.399020 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af9d2341-a591-42b6-91e5-8177924152b8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.402784 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmlxg" event={"ID":"c1343c5b-f89c-4044-9a5a-36db14bb5ed9","Type":"ContainerDied","Data":"4860794945e1db9efa4e7ff3ddc490b199845db3d60b0e11e1101786e63a48c3"} Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.402827 5119 scope.go:117] "RemoveContainer" containerID="7972b8424947509f52868fe43edcfffb5484a52f0e74819d0665190ed8f85ad6" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.402947 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmlxg" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.409735 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vf8tm" event={"ID":"af9d2341-a591-42b6-91e5-8177924152b8","Type":"ContainerDied","Data":"fd8adcfd8a3cd748066d7be919c84bb8da65c894e76de64001a1c4df00e4b901"} Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.409894 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vf8tm" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.422216 5119 scope.go:117] "RemoveContainer" containerID="2a5bff2313083f8acb1544588a14d492cd240afd60f9b29b255c427c2984b130" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.428534 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmlxg"] Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.432332 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmlxg"] Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.437856 5119 scope.go:117] "RemoveContainer" containerID="f98b615a1458df8bff7d5837015ac4754a0a3e3b52e5dfc8dc054bb305ebb3e8" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.451632 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vf8tm"] Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.453855 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vf8tm"] Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.468490 5119 scope.go:117] "RemoveContainer" containerID="c1181ace7c1f6d49f6256c63e04c96a670892d0f0194c6100c5717b4e3e1ac43" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.758488 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af9d2341-a591-42b6-91e5-8177924152b8" path="/var/lib/kubelet/pods/af9d2341-a591-42b6-91e5-8177924152b8/volumes" Jan 30 00:13:30 crc kubenswrapper[5119]: I0130 00:13:30.759616 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" path="/var/lib/kubelet/pods/c1343c5b-f89c-4044-9a5a-36db14bb5ed9/volumes" Jan 30 00:13:31 crc kubenswrapper[5119]: I0130 00:13:31.471287 5119 patch_prober.go:28] interesting pod/downloads-747b44746d-nts9m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 30 00:13:31 crc kubenswrapper[5119]: I0130 00:13:31.471347 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-nts9m" podUID="ff01653b-8f2a-47a1-ae0c-0ac878c25570" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.24:8080/\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 30 00:13:32 crc kubenswrapper[5119]: I0130 00:13:32.013657 5119 scope.go:117] "RemoveContainer" containerID="ebacac0d3bbc090ee3fd1802367960fb118415ac2bbcc63a00b06c42140e46f3" Jan 30 00:13:32 crc kubenswrapper[5119]: I0130 00:13:32.988313 5119 scope.go:117] "RemoveContainer" containerID="ae1f37d2126139d5c321717de3c7fca46efe21bba09cc3a90a417fad16f14157" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004061 5119 kubelet.go:2537] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004647 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerName="extract-content" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004664 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerName="extract-content" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004675 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="d52cae6f-022b-494d-bc48-0dab7b961ad2" containerName="pruner" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004681 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="d52cae6f-022b-494d-bc48-0dab7b961ad2" containerName="pruner" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004690 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004695 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004705 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerName="extract-content" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004710 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerName="extract-content" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004724 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerName="extract-utilities" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004729 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerName="extract-utilities" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004737 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004742 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004750 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="af9d2341-a591-42b6-91e5-8177924152b8" containerName="extract-content" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004754 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="af9d2341-a591-42b6-91e5-8177924152b8" containerName="extract-content" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004762 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerName="extract-utilities" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004768 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerName="extract-utilities" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004784 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004790 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004800 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerName="extract-utilities" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004806 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerName="extract-utilities" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004814 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerName="extract-content" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004820 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerName="extract-content" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004828 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="32f760a1-1874-479d-810f-ac7785c7b94d" containerName="kube-multus-additional-cni-plugins" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004834 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="32f760a1-1874-479d-810f-ac7785c7b94d" containerName="kube-multus-additional-cni-plugins" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004874 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="af9d2341-a591-42b6-91e5-8177924152b8" containerName="extract-utilities" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004880 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="af9d2341-a591-42b6-91e5-8177924152b8" containerName="extract-utilities" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004891 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="31630771-ce1a-4418-89ce-c58d6bf3c61f" containerName="image-pruner" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004896 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="31630771-ce1a-4418-89ce-c58d6bf3c61f" containerName="image-pruner" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004905 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="af9d2341-a591-42b6-91e5-8177924152b8" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004910 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="af9d2341-a591-42b6-91e5-8177924152b8" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004984 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="32f760a1-1874-479d-810f-ac7785c7b94d" containerName="kube-multus-additional-cni-plugins" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.004993 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="c1343c5b-f89c-4044-9a5a-36db14bb5ed9" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.005003 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="f304f5b4-43b0-460e-b28c-f2af396db9a1" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.005013 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="d52cae6f-022b-494d-bc48-0dab7b961ad2" containerName="pruner" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.005020 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="af9d2341-a591-42b6-91e5-8177924152b8" containerName="registry-server" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.005027 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="31630771-ce1a-4418-89ce-c58d6bf3c61f" containerName="image-pruner" Jan 30 00:13:33 crc kubenswrapper[5119]: I0130 00:13:33.005035 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="868949ba-fadb-4cc1-88d4-503a4049f66d" containerName="registry-server" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.086190 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093076 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-747b44746d-nts9m" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093112 5119 kubelet.go:2547] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093140 5119 kubelet.go:2537] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093807 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-syncer" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093834 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-syncer" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093847 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-regeneration-controller" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093857 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-regeneration-controller" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093877 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093884 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093892 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="setup" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093900 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="setup" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093925 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-insecure-readyz" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093932 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-insecure-readyz" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093949 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093955 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093968 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093976 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093985 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.093993 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094007 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094017 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094029 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094038 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094157 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094171 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094180 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094190 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-insecure-readyz" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094199 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-regeneration-controller" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094208 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094220 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-syncer" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094488 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.094505 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.096445 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" containerID="cri-o://b34cbd9138535d7259b96939a704686f8e02efc79a8714278adb31a3c06fcd23" gracePeriod=15 Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.096514 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://3ec64836842604d724c5b3fc6e03787859f37bb6f3f2d868b57963814407dba3" gracePeriod=15 Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.096446 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" containerID="cri-o://71ec6c2a4f2b4ceaf5bd2fe00c0dcc945915014237a6dfe3044ada4899a26c42" gracePeriod=15 Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.096702 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-syncer" containerID="cri-o://5cc2f86492ba54c66d9c6c1a9a34f75bf42fbaa9909b03d0311777b0c0a3795c" gracePeriod=15 Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.096760 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://8d35f4604e32d0d4804a3b34156ed8698a40a743f7ce10ed428780839daeab66" gracePeriod=15 Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.098788 5119 status_manager.go:905] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="3a14caf222afb62aaabdc47808b6f944" podUID="57755cc5f99000cc11e193051474d4e2" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.114705 5119 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.215763 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.215899 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.215936 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.216054 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.216205 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.317254 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.317357 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.317452 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.317472 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.317551 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.317687 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.317720 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.317793 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.317918 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.317876 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:37 crc kubenswrapper[5119]: I0130 00:13:37.432445 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:38 crc kubenswrapper[5119]: I0130 00:13:38.485575 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/3.log" Jan 30 00:13:38 crc kubenswrapper[5119]: I0130 00:13:38.486409 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-cert-syncer/0.log" Jan 30 00:13:38 crc kubenswrapper[5119]: I0130 00:13:38.487091 5119 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="5cc2f86492ba54c66d9c6c1a9a34f75bf42fbaa9909b03d0311777b0c0a3795c" exitCode=2 Jan 30 00:13:38 crc kubenswrapper[5119]: I0130 00:13:38.820167 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 00:13:38 crc kubenswrapper[5119]: I0130 00:13:38.820485 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f7dbc7e1ee9c187a863ef9b473fad27b","Type":"ContainerStarted","Data":"f81e610e5128c57da5c08f72e430398dc10039efb0625bd27d1ed6d4b3e4f976"} Jan 30 00:13:38 crc kubenswrapper[5119]: I0130 00:13:38.924080 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:38 crc kubenswrapper[5119]: I0130 00:13:38.924197 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:38 crc kubenswrapper[5119]: I0130 00:13:38.924221 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-tmp-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:38 crc kubenswrapper[5119]: I0130 00:13:38.924278 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:38 crc kubenswrapper[5119]: I0130 00:13:38.924341 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-ca-bundle-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.025677 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.025722 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-tmp-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.025770 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.025783 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.025824 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-ca-bundle-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.025833 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.025846 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.026100 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.026231 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-tmp-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.026358 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-ca-bundle-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.494307 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/3.log" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.495763 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-cert-syncer/0.log" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.496842 5119 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="b34cbd9138535d7259b96939a704686f8e02efc79a8714278adb31a3c06fcd23" exitCode=0 Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.496862 5119 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="3ec64836842604d724c5b3fc6e03787859f37bb6f3f2d868b57963814407dba3" exitCode=0 Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.496868 5119 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="8d35f4604e32d0d4804a3b34156ed8698a40a743f7ce10ed428780839daeab66" exitCode=0 Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.496913 5119 scope.go:117] "RemoveContainer" containerID="b9e63f15f9c159a5b06fc6e7edf4e12665c6f3ba21dae494c425111f618dcd09" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.498238 5119 generic.go:358] "Generic (PLEG): container finished" podID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" containerID="06af590206f86f3302d04d8b01c0ba33c17ec1ba40ceedaffe1bb9cf7fe275a1" exitCode=0 Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.498297 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-12-crc" event={"ID":"0f34372a-8c67-4ddf-85e2-14c85e52fc7e","Type":"ContainerDied","Data":"06af590206f86f3302d04d8b01c0ba33c17ec1ba40ceedaffe1bb9cf7fe275a1"} Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.510089 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:39 crc kubenswrapper[5119]: I0130 00:13:39.510613 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.753057 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.753548 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.754382 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:40 crc kubenswrapper[5119]: E0130 00:13:40.850700 5119 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.198:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f59e582674b7f openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f7dbc7e1ee9c187a863ef9b473fad27b,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Created,Message:Created container: startup-monitor,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:13:40.850084735 +0000 UTC m=+224.864147204,LastTimestamp:2026-01-30 00:13:40.850084735 +0000 UTC m=+224.864147204,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.856072 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "0f34372a-8c67-4ddf-85e2-14c85e52fc7e" (UID: "0f34372a-8c67-4ddf-85e2-14c85e52fc7e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.855947 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kubelet-dir\") pod \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.856707 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kube-api-access\") pod \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.857595 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-var-lock\") pod \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\" (UID: \"0f34372a-8c67-4ddf-85e2-14c85e52fc7e\") " Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.857743 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-var-lock" (OuterVolumeSpecName: "var-lock") pod "0f34372a-8c67-4ddf-85e2-14c85e52fc7e" (UID: "0f34372a-8c67-4ddf-85e2-14c85e52fc7e"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.857916 5119 reconciler_common.go:299] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.858014 5119 reconciler_common.go:299] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-var-lock\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.873357 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0f34372a-8c67-4ddf-85e2-14c85e52fc7e" (UID: "0f34372a-8c67-4ddf-85e2-14c85e52fc7e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:40 crc kubenswrapper[5119]: I0130 00:13:40.959553 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0f34372a-8c67-4ddf-85e2-14c85e52fc7e-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:41 crc kubenswrapper[5119]: E0130 00:13:41.195337 5119 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:41 crc kubenswrapper[5119]: E0130 00:13:41.195615 5119 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:41 crc kubenswrapper[5119]: E0130 00:13:41.195915 5119 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:41 crc kubenswrapper[5119]: E0130 00:13:41.196922 5119 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:41 crc kubenswrapper[5119]: E0130 00:13:41.197232 5119 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:41 crc kubenswrapper[5119]: I0130 00:13:41.197342 5119 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 30 00:13:41 crc kubenswrapper[5119]: E0130 00:13:41.197691 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="200ms" Jan 30 00:13:41 crc kubenswrapper[5119]: E0130 00:13:41.398920 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="400ms" Jan 30 00:13:41 crc kubenswrapper[5119]: I0130 00:13:41.509233 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-12-crc" event={"ID":"0f34372a-8c67-4ddf-85e2-14c85e52fc7e","Type":"ContainerDied","Data":"0ea66dbc4d001575c448ad97f196654ca28bcb790df7cd5025e9a152ab6d9d4c"} Jan 30 00:13:41 crc kubenswrapper[5119]: I0130 00:13:41.509547 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ea66dbc4d001575c448ad97f196654ca28bcb790df7cd5025e9a152ab6d9d4c" Jan 30 00:13:41 crc kubenswrapper[5119]: I0130 00:13:41.509337 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:13:41 crc kubenswrapper[5119]: I0130 00:13:41.510879 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f7dbc7e1ee9c187a863ef9b473fad27b","Type":"ContainerStarted","Data":"06499ae203d3f8243dc2906a22668ae0e5810d1e4aac7b26d7bbc43ce9907560"} Jan 30 00:13:41 crc kubenswrapper[5119]: I0130 00:13:41.513452 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-cert-syncer/0.log" Jan 30 00:13:41 crc kubenswrapper[5119]: I0130 00:13:41.514081 5119 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="71ec6c2a4f2b4ceaf5bd2fe00c0dcc945915014237a6dfe3044ada4899a26c42" exitCode=0 Jan 30 00:13:41 crc kubenswrapper[5119]: I0130 00:13:41.526457 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:41 crc kubenswrapper[5119]: I0130 00:13:41.526891 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:41 crc kubenswrapper[5119]: E0130 00:13:41.799835 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="800ms" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.519201 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.519913 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:42 crc kubenswrapper[5119]: E0130 00:13:42.600708 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="1.6s" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.736646 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-cert-syncer/0.log" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.737991 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.738636 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.738995 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.739327 5119 status_manager.go:895] "Failed to get status for pod" podUID="3a14caf222afb62aaabdc47808b6f944" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.883195 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir\") pod \"3a14caf222afb62aaabdc47808b6f944\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.883272 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir\") pod \"3a14caf222afb62aaabdc47808b6f944\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.883382 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir\") pod \"3a14caf222afb62aaabdc47808b6f944\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.883413 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "3a14caf222afb62aaabdc47808b6f944" (UID: "3a14caf222afb62aaabdc47808b6f944"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.883438 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir\") pod \"3a14caf222afb62aaabdc47808b6f944\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.883454 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "3a14caf222afb62aaabdc47808b6f944" (UID: "3a14caf222afb62aaabdc47808b6f944"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.883462 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir\") pod \"3a14caf222afb62aaabdc47808b6f944\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.883639 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "3a14caf222afb62aaabdc47808b6f944" (UID: "3a14caf222afb62aaabdc47808b6f944"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.883866 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir" (OuterVolumeSpecName: "ca-bundle-dir") pod "3a14caf222afb62aaabdc47808b6f944" (UID: "3a14caf222afb62aaabdc47808b6f944"). InnerVolumeSpecName "ca-bundle-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.884012 5119 reconciler_common.go:299] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.884025 5119 reconciler_common.go:299] "Volume detached for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.884034 5119 reconciler_common.go:299] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.884041 5119 reconciler_common.go:299] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.886585 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "3a14caf222afb62aaabdc47808b6f944" (UID: "3a14caf222afb62aaabdc47808b6f944"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:42 crc kubenswrapper[5119]: I0130 00:13:42.985036 5119 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.528010 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-cert-syncer/0.log" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.536532 5119 scope.go:117] "RemoveContainer" containerID="b34cbd9138535d7259b96939a704686f8e02efc79a8714278adb31a3c06fcd23" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.536727 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.537974 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.538637 5119 status_manager.go:895] "Failed to get status for pod" podUID="3a14caf222afb62aaabdc47808b6f944" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.539810 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.551350 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.553219 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.553940 5119 status_manager.go:895] "Failed to get status for pod" podUID="3a14caf222afb62aaabdc47808b6f944" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.555402 5119 scope.go:117] "RemoveContainer" containerID="3ec64836842604d724c5b3fc6e03787859f37bb6f3f2d868b57963814407dba3" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.570219 5119 scope.go:117] "RemoveContainer" containerID="8d35f4604e32d0d4804a3b34156ed8698a40a743f7ce10ed428780839daeab66" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.584795 5119 scope.go:117] "RemoveContainer" containerID="5cc2f86492ba54c66d9c6c1a9a34f75bf42fbaa9909b03d0311777b0c0a3795c" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.601588 5119 scope.go:117] "RemoveContainer" containerID="71ec6c2a4f2b4ceaf5bd2fe00c0dcc945915014237a6dfe3044ada4899a26c42" Jan 30 00:13:43 crc kubenswrapper[5119]: I0130 00:13:43.616758 5119 scope.go:117] "RemoveContainer" containerID="6cb744f13386a9d31a521fa5f1bf2bc1e5c867622a28725ebe5ec96adaed410d" Jan 30 00:13:44 crc kubenswrapper[5119]: E0130 00:13:44.201372 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="3.2s" Jan 30 00:13:44 crc kubenswrapper[5119]: I0130 00:13:44.758575 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a14caf222afb62aaabdc47808b6f944" path="/var/lib/kubelet/pods/3a14caf222afb62aaabdc47808b6f944/volumes" Jan 30 00:13:46 crc kubenswrapper[5119]: I0130 00:13:46.756218 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:46 crc kubenswrapper[5119]: I0130 00:13:46.757297 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:46 crc kubenswrapper[5119]: E0130 00:13:46.848760 5119 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.198:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f59e582674b7f openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f7dbc7e1ee9c187a863ef9b473fad27b,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Created,Message:Created container: startup-monitor,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:13:40.850084735 +0000 UTC m=+224.864147204,LastTimestamp:2026-01-30 00:13:40.850084735 +0000 UTC m=+224.864147204,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:13:47 crc kubenswrapper[5119]: E0130 00:13:47.402238 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="6.4s" Jan 30 00:13:51 crc kubenswrapper[5119]: I0130 00:13:51.587372 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:13:51 crc kubenswrapper[5119]: I0130 00:13:51.587757 5119 generic.go:358] "Generic (PLEG): container finished" podID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerID="b1318090dd33aa89fd1dbebbe525d86fe9687c107990e001fccdb19bff19c2fd" exitCode=1 Jan 30 00:13:51 crc kubenswrapper[5119]: I0130 00:13:51.587910 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerDied","Data":"b1318090dd33aa89fd1dbebbe525d86fe9687c107990e001fccdb19bff19c2fd"} Jan 30 00:13:51 crc kubenswrapper[5119]: I0130 00:13:51.588665 5119 scope.go:117] "RemoveContainer" containerID="b1318090dd33aa89fd1dbebbe525d86fe9687c107990e001fccdb19bff19c2fd" Jan 30 00:13:51 crc kubenswrapper[5119]: I0130 00:13:51.589202 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:51 crc kubenswrapper[5119]: I0130 00:13:51.589876 5119 status_manager.go:895] "Failed to get status for pod" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:51 crc kubenswrapper[5119]: I0130 00:13:51.590170 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:52 crc kubenswrapper[5119]: I0130 00:13:52.749135 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:52 crc kubenswrapper[5119]: I0130 00:13:52.750207 5119 status_manager.go:895] "Failed to get status for pod" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:52 crc kubenswrapper[5119]: I0130 00:13:52.750778 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:52 crc kubenswrapper[5119]: I0130 00:13:52.751171 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:52 crc kubenswrapper[5119]: I0130 00:13:52.771098 5119 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:13:52 crc kubenswrapper[5119]: I0130 00:13:52.771148 5119 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:13:52 crc kubenswrapper[5119]: E0130 00:13:52.771733 5119 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:52 crc kubenswrapper[5119]: I0130 00:13:52.771914 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:52 crc kubenswrapper[5119]: W0130 00:13:52.788441 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57755cc5f99000cc11e193051474d4e2.slice/crio-07c160ed71a423439350eaa237e018e67f506bef1aee7f9985eb69a61b0c9db1 WatchSource:0}: Error finding container 07c160ed71a423439350eaa237e018e67f506bef1aee7f9985eb69a61b0c9db1: Status 404 returned error can't find the container with id 07c160ed71a423439350eaa237e018e67f506bef1aee7f9985eb69a61b0c9db1 Jan 30 00:13:53 crc kubenswrapper[5119]: I0130 00:13:53.599859 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:13:53 crc kubenswrapper[5119]: I0130 00:13:53.600337 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"03d18ad9367f5163e32264788ea9c60e52765211f31052bd2850f0b727ba46e1"} Jan 30 00:13:53 crc kubenswrapper[5119]: I0130 00:13:53.601424 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"07c160ed71a423439350eaa237e018e67f506bef1aee7f9985eb69a61b0c9db1"} Jan 30 00:13:53 crc kubenswrapper[5119]: E0130 00:13:53.803339 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="7s" Jan 30 00:13:54 crc kubenswrapper[5119]: I0130 00:13:54.210992 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:13:54 crc kubenswrapper[5119]: I0130 00:13:54.370669 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:13:54 crc kubenswrapper[5119]: I0130 00:13:54.370733 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:13:55 crc kubenswrapper[5119]: I0130 00:13:55.611413 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:55 crc kubenswrapper[5119]: I0130 00:13:55.612071 5119 status_manager.go:895] "Failed to get status for pod" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:55 crc kubenswrapper[5119]: I0130 00:13:55.612344 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:56 crc kubenswrapper[5119]: I0130 00:13:56.618056 5119 generic.go:358] "Generic (PLEG): container finished" podID="57755cc5f99000cc11e193051474d4e2" containerID="59b4279dc345a59f59dfe341b9d2e44b2dc402ed21ccbe0e5cb1a56339379c82" exitCode=0 Jan 30 00:13:56 crc kubenswrapper[5119]: I0130 00:13:56.618197 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerDied","Data":"59b4279dc345a59f59dfe341b9d2e44b2dc402ed21ccbe0e5cb1a56339379c82"} Jan 30 00:13:56 crc kubenswrapper[5119]: I0130 00:13:56.758970 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:56 crc kubenswrapper[5119]: I0130 00:13:56.759932 5119 status_manager.go:895] "Failed to get status for pod" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:56 crc kubenswrapper[5119]: I0130 00:13:56.760260 5119 status_manager.go:895] "Failed to get status for pod" podUID="57755cc5f99000cc11e193051474d4e2" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:56 crc kubenswrapper[5119]: I0130 00:13:56.760625 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:56 crc kubenswrapper[5119]: E0130 00:13:56.850221 5119 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.198:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f59e582674b7f openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f7dbc7e1ee9c187a863ef9b473fad27b,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Created,Message:Created container: startup-monitor,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:13:40.850084735 +0000 UTC m=+224.864147204,LastTimestamp:2026-01-30 00:13:40.850084735 +0000 UTC m=+224.864147204,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:13:57 crc kubenswrapper[5119]: I0130 00:13:57.623608 5119 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:13:57 crc kubenswrapper[5119]: I0130 00:13:57.623640 5119 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:13:57 crc kubenswrapper[5119]: E0130 00:13:57.624048 5119 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:57 crc kubenswrapper[5119]: I0130 00:13:57.624064 5119 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:57 crc kubenswrapper[5119]: I0130 00:13:57.624501 5119 status_manager.go:895] "Failed to get status for pod" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:57 crc kubenswrapper[5119]: I0130 00:13:57.624810 5119 status_manager.go:895] "Failed to get status for pod" podUID="57755cc5f99000cc11e193051474d4e2" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:57 crc kubenswrapper[5119]: I0130 00:13:57.625071 5119 status_manager.go:895] "Failed to get status for pod" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.198:6443: connect: connection refused" Jan 30 00:13:58 crc kubenswrapper[5119]: I0130 00:13:58.069376 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:13:58 crc kubenswrapper[5119]: I0130 00:13:58.069664 5119 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 30 00:13:58 crc kubenswrapper[5119]: I0130 00:13:58.069714 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 30 00:13:58 crc kubenswrapper[5119]: I0130 00:13:58.865673 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:14:00 crc kubenswrapper[5119]: E0130 00:14:00.804702 5119 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.198:6443: connect: connection refused" interval="7s" Jan 30 00:14:01 crc kubenswrapper[5119]: I0130 00:14:01.656207 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"9a97ffb9030eeda63b09da961cb042c27f609c4cf1560ebf6fd916610f3c533a"} Jan 30 00:14:03 crc kubenswrapper[5119]: I0130 00:14:03.668374 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"a90b21fdb4e3a10617d3d886af0780550ece763142d8ba36974141766762f5fa"} Jan 30 00:14:05 crc kubenswrapper[5119]: I0130 00:14:05.681261 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"0dd0c3aaba6387887b4eec4a6a3f39e25eefe548ec33deee200abd935bb51235"} Jan 30 00:14:08 crc kubenswrapper[5119]: I0130 00:14:08.069474 5119 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 30 00:14:08 crc kubenswrapper[5119]: I0130 00:14:08.069969 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 30 00:14:08 crc kubenswrapper[5119]: I0130 00:14:08.700816 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"23563decd2e3bcc4e021efae815938b6a3c29e9236babd7b67828e60faa91694"} Jan 30 00:14:10 crc kubenswrapper[5119]: I0130 00:14:10.715913 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"272b495839b5ffa333fbe88f28c2530b61c9b94d792d62ae6dd7c98fc4a1ced9"} Jan 30 00:14:11 crc kubenswrapper[5119]: I0130 00:14:11.723459 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:14:11 crc kubenswrapper[5119]: I0130 00:14:11.723518 5119 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:14:11 crc kubenswrapper[5119]: I0130 00:14:11.724500 5119 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:14:11 crc kubenswrapper[5119]: I0130 00:14:11.740228 5119 kubelet.go:3329] "Deleted mirror pod as it didn't match the static Pod" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:14:11 crc kubenswrapper[5119]: I0130 00:14:11.740714 5119 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:14:11 crc kubenswrapper[5119]: I0130 00:14:11.769965 5119 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00289c64-f714-4516-af94-db01b82df194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"},\\\"containerID\\\":\\\"cri-o://9a97ffb9030eeda63b09da961cb042c27f609c4cf1560ebf6fd916610f3c533a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:14:00Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-bundle-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://0dd0c3aaba6387887b4eec4a6a3f39e25eefe548ec33deee200abd935bb51235\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:14:04Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://a90b21fdb4e3a10617d3d886af0780550ece763142d8ba36974141766762f5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:14:02Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://272b495839b5ffa333fbe88f28c2530b61c9b94d792d62ae6dd7c98fc4a1ced9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:14:10Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://23563decd2e3bcc4e021efae815938b6a3c29e9236babd7b67828e60faa91694\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:14:08Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"phase\\\":\\\"Running\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Pod \"kube-apiserver-crc\" is invalid: metadata.uid: Invalid value: \"00289c64-f714-4516-af94-db01b82df194\": field is immutable" Jan 30 00:14:12 crc kubenswrapper[5119]: I0130 00:14:12.728909 5119 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:14:12 crc kubenswrapper[5119]: I0130 00:14:12.729273 5119 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:14:12 crc kubenswrapper[5119]: I0130 00:14:12.772879 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:14:12 crc kubenswrapper[5119]: I0130 00:14:12.772950 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:14:12 crc kubenswrapper[5119]: I0130 00:14:12.777200 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:14:12 crc kubenswrapper[5119]: I0130 00:14:12.779876 5119 status_manager.go:905] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="57755cc5f99000cc11e193051474d4e2" podUID="6269a5c3-f945-4919-a83e-efc2528cf01e" Jan 30 00:14:13 crc kubenswrapper[5119]: I0130 00:14:13.736438 5119 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:14:13 crc kubenswrapper[5119]: I0130 00:14:13.736490 5119 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:14:14 crc kubenswrapper[5119]: I0130 00:14:14.740768 5119 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:14:14 crc kubenswrapper[5119]: I0130 00:14:14.741065 5119 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:14:14 crc kubenswrapper[5119]: I0130 00:14:14.746208 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:14:15 crc kubenswrapper[5119]: I0130 00:14:15.744860 5119 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:14:15 crc kubenswrapper[5119]: I0130 00:14:15.744894 5119 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="00289c64-f714-4516-af94-db01b82df194" Jan 30 00:14:16 crc kubenswrapper[5119]: I0130 00:14:16.769839 5119 status_manager.go:905] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="57755cc5f99000cc11e193051474d4e2" podUID="6269a5c3-f945-4919-a83e-efc2528cf01e" Jan 30 00:14:18 crc kubenswrapper[5119]: I0130 00:14:18.069050 5119 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 30 00:14:18 crc kubenswrapper[5119]: I0130 00:14:18.069165 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 30 00:14:18 crc kubenswrapper[5119]: I0130 00:14:18.069252 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:14:18 crc kubenswrapper[5119]: I0130 00:14:18.070247 5119 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"03d18ad9367f5163e32264788ea9c60e52765211f31052bd2850f0b727ba46e1"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Jan 30 00:14:18 crc kubenswrapper[5119]: I0130 00:14:18.070412 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerName="kube-controller-manager" containerID="cri-o://03d18ad9367f5163e32264788ea9c60e52765211f31052bd2850f0b727ba46e1" gracePeriod=30 Jan 30 00:14:20 crc kubenswrapper[5119]: I0130 00:14:20.041637 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-config\"" Jan 30 00:14:20 crc kubenswrapper[5119]: I0130 00:14:20.112584 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-samples-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:20 crc kubenswrapper[5119]: I0130 00:14:20.147690 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-serving-cert\"" Jan 30 00:14:20 crc kubenswrapper[5119]: I0130 00:14:20.996065 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:21 crc kubenswrapper[5119]: I0130 00:14:21.324010 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-service-ca-bundle\"" Jan 30 00:14:21 crc kubenswrapper[5119]: I0130 00:14:21.945655 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"node-resolver-dockercfg-tk7bt\"" Jan 30 00:14:23 crc kubenswrapper[5119]: I0130 00:14:23.910162 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"openshift-service-ca.crt\"" Jan 30 00:14:24 crc kubenswrapper[5119]: I0130 00:14:24.212444 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-samples-operator\"/\"samples-operator-tls\"" Jan 30 00:14:24 crc kubenswrapper[5119]: I0130 00:14:24.371013 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:14:24 crc kubenswrapper[5119]: I0130 00:14:24.371090 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:14:24 crc kubenswrapper[5119]: I0130 00:14:24.371138 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:14:24 crc kubenswrapper[5119]: I0130 00:14:24.371644 5119 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4ec9f325eee2102e27ce2c2c8fd3570bc6b933200f4125272f5d5dc6a4741502"} pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:14:24 crc kubenswrapper[5119]: I0130 00:14:24.371701 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" containerID="cri-o://4ec9f325eee2102e27ce2c2c8fd3570bc6b933200f4125272f5d5dc6a4741502" gracePeriod=600 Jan 30 00:14:24 crc kubenswrapper[5119]: I0130 00:14:24.790980 5119 generic.go:358] "Generic (PLEG): container finished" podID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerID="4ec9f325eee2102e27ce2c2c8fd3570bc6b933200f4125272f5d5dc6a4741502" exitCode=0 Jan 30 00:14:24 crc kubenswrapper[5119]: I0130 00:14:24.791111 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerDied","Data":"4ec9f325eee2102e27ce2c2c8fd3570bc6b933200f4125272f5d5dc6a4741502"} Jan 30 00:14:24 crc kubenswrapper[5119]: I0130 00:14:24.791685 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerStarted","Data":"2293b955c1384c8479b87ad35b70303afcaed7f3f538d92a41b347eff7768adf"} Jan 30 00:14:25 crc kubenswrapper[5119]: I0130 00:14:25.048313 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:26 crc kubenswrapper[5119]: I0130 00:14:26.572228 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:27 crc kubenswrapper[5119]: I0130 00:14:27.460666 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:28 crc kubenswrapper[5119]: I0130 00:14:28.932350 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"node-ca-dockercfg-tjs74\"" Jan 30 00:14:29 crc kubenswrapper[5119]: I0130 00:14:29.099113 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"kube-root-ca.crt\"" Jan 30 00:14:29 crc kubenswrapper[5119]: I0130 00:14:29.233211 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console-operator\"/\"serving-cert\"" Jan 30 00:14:29 crc kubenswrapper[5119]: I0130 00:14:29.415211 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"node-bootstrapper-token\"" Jan 30 00:14:30 crc kubenswrapper[5119]: I0130 00:14:30.235248 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:31 crc kubenswrapper[5119]: I0130 00:14:31.089942 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:34 crc kubenswrapper[5119]: I0130 00:14:34.855709 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"dns-default-metrics-tls\"" Jan 30 00:14:35 crc kubenswrapper[5119]: I0130 00:14:35.566331 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"env-overrides\"" Jan 30 00:14:37 crc kubenswrapper[5119]: I0130 00:14:37.749437 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"openshift-service-ca.crt\"" Jan 30 00:14:38 crc kubenswrapper[5119]: I0130 00:14:38.073584 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-config\"" Jan 30 00:14:38 crc kubenswrapper[5119]: I0130 00:14:38.425687 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"kube-rbac-proxy\"" Jan 30 00:14:38 crc kubenswrapper[5119]: I0130 00:14:38.550941 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-config\"" Jan 30 00:14:39 crc kubenswrapper[5119]: I0130 00:14:39.410466 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-config\"" Jan 30 00:14:40 crc kubenswrapper[5119]: I0130 00:14:40.188139 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-metrics-certs-default\"" Jan 30 00:14:40 crc kubenswrapper[5119]: I0130 00:14:40.201990 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"service-ca-bundle\"" Jan 30 00:14:40 crc kubenswrapper[5119]: I0130 00:14:40.517364 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-tls\"" Jan 30 00:14:41 crc kubenswrapper[5119]: I0130 00:14:41.163450 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"openshift-service-ca.crt\"" Jan 30 00:14:41 crc kubenswrapper[5119]: I0130 00:14:41.932980 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"ovnkube-config\"" Jan 30 00:14:41 crc kubenswrapper[5119]: I0130 00:14:41.965916 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-images\"" Jan 30 00:14:42 crc kubenswrapper[5119]: I0130 00:14:42.422536 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:42 crc kubenswrapper[5119]: I0130 00:14:42.539680 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"openshift-apiserver-sa-dockercfg-4zqgh\"" Jan 30 00:14:42 crc kubenswrapper[5119]: I0130 00:14:42.713036 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:43 crc kubenswrapper[5119]: I0130 00:14:43.570220 5119 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:14:43 crc kubenswrapper[5119]: I0130 00:14:43.821971 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"openshift-service-ca.crt\"" Jan 30 00:14:43 crc kubenswrapper[5119]: I0130 00:14:43.957776 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"audit\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.021449 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"default-cni-sysctl-allowlist\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.194094 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"packageserver-service-cert\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.270736 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"pprof-cert\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.290255 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"olm-operator-serviceaccount-dockercfg-4gqzj\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.295719 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-service-ca\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.318764 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"openshift-service-ca.crt\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.329216 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.536099 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-node-dockercfg-l2v2m\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.544596 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"kube-root-ca.crt\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.678575 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"community-operators-dockercfg-vrd5f\"" Jan 30 00:14:44 crc kubenswrapper[5119]: I0130 00:14:44.945218 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:14:45 crc kubenswrapper[5119]: I0130 00:14:45.032044 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-dockercfg-4vdnc\"" Jan 30 00:14:45 crc kubenswrapper[5119]: I0130 00:14:45.168583 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-dockercfg-8dkm8\"" Jan 30 00:14:45 crc kubenswrapper[5119]: I0130 00:14:45.217651 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca-operator\"/\"service-ca-operator-dockercfg-bjqfd\"" Jan 30 00:14:45 crc kubenswrapper[5119]: I0130 00:14:45.291348 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-dockercfg-6n5ln\"" Jan 30 00:14:45 crc kubenswrapper[5119]: I0130 00:14:45.529056 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-server-dockercfg-dzw6b\"" Jan 30 00:14:45 crc kubenswrapper[5119]: I0130 00:14:45.978827 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"oauth-serving-cert\"" Jan 30 00:14:46 crc kubenswrapper[5119]: I0130 00:14:46.102467 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"openshift-service-ca.crt\"" Jan 30 00:14:46 crc kubenswrapper[5119]: I0130 00:14:46.202973 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator\"/\"kube-root-ca.crt\"" Jan 30 00:14:46 crc kubenswrapper[5119]: I0130 00:14:46.238891 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"marketplace-trusted-ca\"" Jan 30 00:14:46 crc kubenswrapper[5119]: I0130 00:14:46.274156 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca\"/\"signing-key\"" Jan 30 00:14:46 crc kubenswrapper[5119]: I0130 00:14:46.475161 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"metrics-daemon-secret\"" Jan 30 00:14:46 crc kubenswrapper[5119]: I0130 00:14:46.552086 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"authentication-operator-config\"" Jan 30 00:14:46 crc kubenswrapper[5119]: I0130 00:14:46.703448 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"etcd-client\"" Jan 30 00:14:46 crc kubenswrapper[5119]: I0130 00:14:46.960492 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-dockercfg-bf7fj\"" Jan 30 00:14:47 crc kubenswrapper[5119]: I0130 00:14:47.216913 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"proxy-tls\"" Jan 30 00:14:47 crc kubenswrapper[5119]: I0130 00:14:47.266823 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-operator\"/\"ingress-operator-dockercfg-74nwh\"" Jan 30 00:14:47 crc kubenswrapper[5119]: I0130 00:14:47.267024 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"registry-dockercfg-6w67b\"" Jan 30 00:14:47 crc kubenswrapper[5119]: I0130 00:14:47.298686 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"kube-root-ca.crt\"" Jan 30 00:14:47 crc kubenswrapper[5119]: I0130 00:14:47.577581 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-router-certs\"" Jan 30 00:14:47 crc kubenswrapper[5119]: I0130 00:14:47.662188 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"trusted-ca\"" Jan 30 00:14:47 crc kubenswrapper[5119]: I0130 00:14:47.747332 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"service-ca\"" Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.307574 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"openshift-service-ca.crt\"" Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.333005 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-ac-dockercfg-gj7jx\"" Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.561384 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"etcd-client\"" Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.700995 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"iptables-alerter-script\"" Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.826448 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.950017 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log" Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.951839 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.951885 5119 generic.go:358] "Generic (PLEG): container finished" podID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerID="03d18ad9367f5163e32264788ea9c60e52765211f31052bd2850f0b727ba46e1" exitCode=137 Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.952225 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerDied","Data":"03d18ad9367f5163e32264788ea9c60e52765211f31052bd2850f0b727ba46e1"} Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.952264 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"78b223c89d86e4d56c997d7a68694ea3a7111128bf2feacac7a1495e4d000703"} Jan 30 00:14:48 crc kubenswrapper[5119]: I0130 00:14:48.952285 5119 scope.go:117] "RemoveContainer" containerID="b1318090dd33aa89fd1dbebbe525d86fe9687c107990e001fccdb19bff19c2fd" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.026362 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.120636 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"openshift-service-ca.crt\"" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.178934 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"ovnkube-script-lib\"" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.359073 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-trusted-ca-bundle\"" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.422828 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.484847 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-config\"" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.658986 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"client-ca\"" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.707822 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-error\"" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.742256 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"openshift-service-ca.crt\"" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.759676 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"signing-cabundle\"" Jan 30 00:14:49 crc kubenswrapper[5119]: I0130 00:14:49.958311 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.070855 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"trusted-ca-bundle\"" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.164450 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"multus-daemon-config\"" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.197350 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-client\"" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.338788 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"image-registry-operator-tls\"" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.369382 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.370761 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"service-ca-bundle\"" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.416324 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-certs-default\"" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.510720 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"marketplace-operator-metrics\"" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.579038 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-version\"/\"cluster-version-operator-serving-cert\"" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.750938 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-dockercfg-2wbn2\"" Jan 30 00:14:50 crc kubenswrapper[5119]: I0130 00:14:50.979156 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-config\"" Jan 30 00:14:51 crc kubenswrapper[5119]: I0130 00:14:51.085352 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:51 crc kubenswrapper[5119]: I0130 00:14:51.304172 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"hostpath-provisioner\"/\"kube-root-ca.crt\"" Jan 30 00:14:51 crc kubenswrapper[5119]: I0130 00:14:51.517177 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"cni-copy-resources\"" Jan 30 00:14:51 crc kubenswrapper[5119]: I0130 00:14:51.769804 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"trusted-ca-bundle\"" Jan 30 00:14:51 crc kubenswrapper[5119]: I0130 00:14:51.836565 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-controller-dockercfg-xnj77\"" Jan 30 00:14:51 crc kubenswrapper[5119]: I0130 00:14:51.848892 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"certified-operators-dockercfg-7cl8d\"" Jan 30 00:14:51 crc kubenswrapper[5119]: I0130 00:14:51.901606 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:14:51 crc kubenswrapper[5119]: I0130 00:14:51.959243 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-config-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:51 crc kubenswrapper[5119]: I0130 00:14:51.989067 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-console\"/\"networking-console-plugin-cert\"" Jan 30 00:14:52 crc kubenswrapper[5119]: I0130 00:14:52.016756 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-samples-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:52 crc kubenswrapper[5119]: I0130 00:14:52.134327 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:52 crc kubenswrapper[5119]: I0130 00:14:52.137072 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"trusted-ca\"" Jan 30 00:14:52 crc kubenswrapper[5119]: I0130 00:14:52.556130 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"image-registry-certificates\"" Jan 30 00:14:52 crc kubenswrapper[5119]: I0130 00:14:52.595320 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-config-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:52 crc kubenswrapper[5119]: I0130 00:14:52.598188 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:52 crc kubenswrapper[5119]: I0130 00:14:52.694939 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca-operator\"/\"serving-cert\"" Jan 30 00:14:52 crc kubenswrapper[5119]: I0130 00:14:52.853866 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-operator\"/\"metrics-tls\"" Jan 30 00:14:52 crc kubenswrapper[5119]: I0130 00:14:52.903801 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-version\"/\"default-dockercfg-hqpm5\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.036603 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.105035 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.248194 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"control-plane-machine-set-operator-tls\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.288258 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"etcd-serving-ca\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.443797 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console-operator\"/\"console-operator-dockercfg-kl6m8\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.513960 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-canary\"/\"openshift-service-ca.crt\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.560252 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"kube-root-ca.crt\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.611478 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-dockercfg-jcmfj\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.673874 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"service-ca-operator-config\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.729541 5119 reflector.go:430] "Caches populated" type="*v1.CSIDriver" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.830998 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"oauth-apiserver-sa-dockercfg-qqw4z\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.919138 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:14:53 crc kubenswrapper[5119]: I0130 00:14:53.942464 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:54 crc kubenswrapper[5119]: I0130 00:14:54.018889 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-daemon-dockercfg-w9nzh\"" Jan 30 00:14:54 crc kubenswrapper[5119]: I0130 00:14:54.145471 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"kube-root-ca.crt\"" Jan 30 00:14:54 crc kubenswrapper[5119]: I0130 00:14:54.678028 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-control-plane-metrics-cert\"" Jan 30 00:14:54 crc kubenswrapper[5119]: I0130 00:14:54.728797 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-dockercfg-kw8fx\"" Jan 30 00:14:54 crc kubenswrapper[5119]: I0130 00:14:54.729317 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns-operator\"/\"dns-operator-dockercfg-wbbsn\"" Jan 30 00:14:54 crc kubenswrapper[5119]: I0130 00:14:54.770627 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:55 crc kubenswrapper[5119]: I0130 00:14:55.076891 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-samples-operator\"/\"cluster-samples-operator-dockercfg-jmhxf\"" Jan 30 00:14:55 crc kubenswrapper[5119]: I0130 00:14:55.103088 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-node-metrics-cert\"" Jan 30 00:14:55 crc kubenswrapper[5119]: I0130 00:14:55.141586 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"openshift-service-ca.crt\"" Jan 30 00:14:55 crc kubenswrapper[5119]: I0130 00:14:55.443607 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"kube-root-ca.crt\"" Jan 30 00:14:55 crc kubenswrapper[5119]: I0130 00:14:55.539173 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-session\"" Jan 30 00:14:55 crc kubenswrapper[5119]: I0130 00:14:55.750789 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-stats-default\"" Jan 30 00:14:55 crc kubenswrapper[5119]: I0130 00:14:55.768790 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager\"/\"serving-cert\"" Jan 30 00:14:55 crc kubenswrapper[5119]: I0130 00:14:55.847570 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"kube-root-ca.crt\"" Jan 30 00:14:55 crc kubenswrapper[5119]: I0130 00:14:55.919663 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:55 crc kubenswrapper[5119]: I0130 00:14:55.974510 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-node-identity\"/\"network-node-identity-cert\"" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.405223 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-diagnostics\"/\"openshift-service-ca.crt\"" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.413431 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"control-plane-machine-set-operator-dockercfg-gnx66\"" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.514571 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-canary\"/\"canary-serving-cert\"" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.527969 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"openshift-service-ca.crt\"" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.604664 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"hostpath-provisioner\"/\"csi-hostpath-provisioner-sa-dockercfg-7dcws\"" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.646544 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator\"/\"kube-storage-version-migrator-sa-dockercfg-kknhg\"" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.746790 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"serving-cert\"" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.834317 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-serving-cert\"" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.903187 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.903456 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log" Jan 30 00:14:56 crc kubenswrapper[5119]: I0130 00:14:56.961122 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-ca-bundle\"" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.055973 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.093269 5119 ???:1] "http: TLS handshake error from 192.168.126.11:49762: no serving certificate available for the kubelet" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.187743 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"image-import-ca\"" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.214522 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"kube-rbac-proxy\"" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.337661 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"hostpath-provisioner\"/\"openshift-service-ca.crt\"" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.384683 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"mcc-proxy-tls\"" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.805654 5119 reflector.go:430] "Caches populated" logger="kubernetes.io/kubelet-serving" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.813052 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"kube-root-ca.crt\"" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.821572 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"kube-rbac-proxy\"" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.831600 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-serving-cert\"" Jan 30 00:14:57 crc kubenswrapper[5119]: I0130 00:14:57.858674 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"encryption-config-1\"" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.036605 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"installation-pull-secrets\"" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.068009 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"kube-root-ca.crt\"" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.069205 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.077452 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.128777 5119 ???:1] "http: TLS handshake error from 192.168.126.11:49776: no serving certificate available for the kubelet" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.192542 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-dockercfg-6c46w\"" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.259766 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-operators-dockercfg-9gxlh\"" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.383841 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-oauth-config\"" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.396987 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-ocp-branding-template\"" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.683570 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"openshift-service-ca.crt\"" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.789871 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"kube-root-ca.crt\"" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.865794 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:14:58 crc kubenswrapper[5119]: I0130 00:14:58.873291 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.119109 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.125429 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.221049 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-console\"/\"networking-console-plugin\"" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.325960 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"package-server-manager-serving-cert\"" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.460836 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns-operator\"/\"metrics-tls\"" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.480111 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-version\"/\"openshift-service-ca.crt\"" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.565567 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"kube-root-ca.crt\"" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.840929 5119 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="pkg/kubelet/config/apiserver.go:66" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.845445 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=82.845418373 podStartE2EDuration="1m22.845418373s" podCreationTimestamp="2026-01-30 00:13:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:14:06.775025132 +0000 UTC m=+250.789087611" watchObservedRunningTime="2026-01-30 00:14:59.845418373 +0000 UTC m=+303.859480872" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.848577 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.848646 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.856812 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.881883 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=48.881853333 podStartE2EDuration="48.881853333s" podCreationTimestamp="2026-01-30 00:14:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:14:59.879240529 +0000 UTC m=+303.893302988" watchObservedRunningTime="2026-01-30 00:14:59.881853333 +0000 UTC m=+303.895915842" Jan 30 00:14:59 crc kubenswrapper[5119]: I0130 00:14:59.937607 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-dockercfg-tnfx9\"" Jan 30 00:15:00 crc kubenswrapper[5119]: I0130 00:15:00.094026 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"kube-root-ca.crt\"" Jan 30 00:15:00 crc kubenswrapper[5119]: I0130 00:15:00.215241 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-canary\"/\"default-dockercfg-9pgs7\"" Jan 30 00:15:00 crc kubenswrapper[5119]: I0130 00:15:00.304019 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"whereabouts-flatfile-config\"" Jan 30 00:15:00 crc kubenswrapper[5119]: I0130 00:15:00.749182 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"cluster-image-registry-operator-dockercfg-ntnd7\"" Jan 30 00:15:00 crc kubenswrapper[5119]: I0130 00:15:00.800982 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"console-config\"" Jan 30 00:15:00 crc kubenswrapper[5119]: I0130 00:15:00.852069 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-ancillary-tools-dockercfg-nwglk\"" Jan 30 00:15:00 crc kubenswrapper[5119]: I0130 00:15:00.937341 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"client-ca\"" Jan 30 00:15:00 crc kubenswrapper[5119]: I0130 00:15:00.968853 5119 reflector.go:430] "Caches populated" type="*v1.RuntimeClass" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:15:01 crc kubenswrapper[5119]: I0130 00:15:01.012687 5119 generic.go:358] "Generic (PLEG): container finished" podID="e627abc4-228d-4133-8f48-393e979d9826" containerID="cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4" exitCode=0 Jan 30 00:15:01 crc kubenswrapper[5119]: I0130 00:15:01.012803 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" event={"ID":"e627abc4-228d-4133-8f48-393e979d9826","Type":"ContainerDied","Data":"cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4"} Jan 30 00:15:01 crc kubenswrapper[5119]: I0130 00:15:01.013363 5119 scope.go:117] "RemoveContainer" containerID="cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4" Jan 30 00:15:01 crc kubenswrapper[5119]: I0130 00:15:01.015703 5119 provider.go:93] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 00:15:01 crc kubenswrapper[5119]: I0130 00:15:01.125078 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-operator-images\"" Jan 30 00:15:01 crc kubenswrapper[5119]: I0130 00:15:01.601517 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication-operator\"/\"authentication-operator-dockercfg-6tbpn\"" Jan 30 00:15:01 crc kubenswrapper[5119]: I0130 00:15:01.675976 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"kube-root-ca.crt\"" Jan 30 00:15:01 crc kubenswrapper[5119]: I0130 00:15:01.698962 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"kube-root-ca.crt\"" Jan 30 00:15:01 crc kubenswrapper[5119]: I0130 00:15:01.806859 5119 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:15:01 crc kubenswrapper[5119]: I0130 00:15:01.846190 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"kube-root-ca.crt\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.000885 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-route-controller-manager\"/\"serving-cert\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.019583 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" event={"ID":"e627abc4-228d-4133-8f48-393e979d9826","Type":"ContainerStarted","Data":"7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d"} Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.020081 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.021021 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.041185 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"console-operator-config\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.156219 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-config-operator\"/\"openshift-config-operator-dockercfg-sjn6s\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.284779 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"dns-dockercfg-kpvmz\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.329485 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-operator\"/\"metrics-tls\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.469740 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-server-tls\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.490266 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"audit-1\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.645313 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"trusted-ca\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.854815 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"audit-1\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.856933 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"env-overrides\"" Jan 30 00:15:02 crc kubenswrapper[5119]: I0130 00:15:02.935134 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"openshift-global-ca\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.038808 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-config-operator\"/\"config-operator-serving-cert\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.409875 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"dns-default\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.458666 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"image-registry-tls\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.536486 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"kube-root-ca.crt\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.548914 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"encryption-config-1\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.651063 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"config\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.697377 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"default-dockercfg-mdwwj\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.770875 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.808855 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"config\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.882877 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-idp-0-file-data\"" Jan 30 00:15:03 crc kubenswrapper[5119]: I0130 00:15:03.965117 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-dockercfg-2h6bs\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.259198 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"marketplace-operator-dockercfg-2cfkp\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.321042 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-provider-selection\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.394952 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"config\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.409565 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-serving-cert\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.473375 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"trusted-ca-bundle\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.484678 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-operator-dockercfg-sw6nc\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.569561 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"ovnkube-identity-cm\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.578818 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.651176 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm"] Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.651905 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" containerName="installer" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.651930 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" containerName="installer" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.652022 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="0f34372a-8c67-4ddf-85e2-14c85e52fc7e" containerName="installer" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.658947 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.661712 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-config\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.661944 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-dockercfg-vfqp6\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.662038 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm"] Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.760869 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-config-volume\") pod \"collect-profiles-29495535-j2kkm\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.760933 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-secret-volume\") pod \"collect-profiles-29495535-j2kkm\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.760977 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfd9c\" (UniqueName: \"kubernetes.io/projected/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-kube-api-access-zfd9c\") pod \"collect-profiles-29495535-j2kkm\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.862580 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-secret-volume\") pod \"collect-profiles-29495535-j2kkm\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.862644 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-zfd9c\" (UniqueName: \"kubernetes.io/projected/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-kube-api-access-zfd9c\") pod \"collect-profiles-29495535-j2kkm\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.862706 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-config-volume\") pod \"collect-profiles-29495535-j2kkm\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.863644 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-config-volume\") pod \"collect-profiles-29495535-j2kkm\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.869413 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-secret-volume\") pod \"collect-profiles-29495535-j2kkm\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.892666 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfd9c\" (UniqueName: \"kubernetes.io/projected/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-kube-api-access-zfd9c\") pod \"collect-profiles-29495535-j2kkm\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.904788 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-login\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.918813 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.971794 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"serving-cert\"" Jan 30 00:15:04 crc kubenswrapper[5119]: I0130 00:15:04.978746 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:05 crc kubenswrapper[5119]: I0130 00:15:05.439138 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca\"/\"service-ca-dockercfg-bgxvm\"" Jan 30 00:15:05 crc kubenswrapper[5119]: I0130 00:15:05.610297 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-admission-controller-secret\"" Jan 30 00:15:05 crc kubenswrapper[5119]: I0130 00:15:05.874615 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"etcd-serving-ca\"" Jan 30 00:15:05 crc kubenswrapper[5119]: I0130 00:15:05.919528 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"config\"" Jan 30 00:15:06 crc kubenswrapper[5119]: I0130 00:15:06.272183 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"openshift-service-ca.crt\"" Jan 30 00:15:06 crc kubenswrapper[5119]: I0130 00:15:06.394546 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager\"/\"openshift-controller-manager-sa-dockercfg-djmfg\"" Jan 30 00:15:06 crc kubenswrapper[5119]: I0130 00:15:06.833299 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-tls\"" Jan 30 00:15:06 crc kubenswrapper[5119]: I0130 00:15:06.858870 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"oauth-openshift-dockercfg-d2bf2\"" Jan 30 00:15:06 crc kubenswrapper[5119]: I0130 00:15:06.881299 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-route-controller-manager\"/\"route-controller-manager-sa-dockercfg-mmcpt\"" Jan 30 00:15:06 crc kubenswrapper[5119]: I0130 00:15:06.976192 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication-operator\"/\"serving-cert\"" Jan 30 00:15:06 crc kubenswrapper[5119]: I0130 00:15:06.997600 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"serving-cert\"" Jan 30 00:15:07 crc kubenswrapper[5119]: I0130 00:15:07.033871 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm"] Jan 30 00:15:07 crc kubenswrapper[5119]: W0130 00:15:07.047397 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e650ae1_e2c6_4bd9_9399_2ef77e811f63.slice/crio-ac6740d73fcd2cebdcf5efb8bf5b8aa366e106ab4ac6489c74f218c096e29e24 WatchSource:0}: Error finding container ac6740d73fcd2cebdcf5efb8bf5b8aa366e106ab4ac6489c74f218c096e29e24: Status 404 returned error can't find the container with id ac6740d73fcd2cebdcf5efb8bf5b8aa366e106ab4ac6489c74f218c096e29e24 Jan 30 00:15:07 crc kubenswrapper[5119]: I0130 00:15:07.061031 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" event={"ID":"4e650ae1-e2c6-4bd9-9399-2ef77e811f63","Type":"ContainerStarted","Data":"ac6740d73fcd2cebdcf5efb8bf5b8aa366e106ab4ac6489c74f218c096e29e24"} Jan 30 00:15:07 crc kubenswrapper[5119]: I0130 00:15:07.391969 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-config\"" Jan 30 00:15:07 crc kubenswrapper[5119]: I0130 00:15:07.537926 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"olm-operator-serving-cert\"" Jan 30 00:15:07 crc kubenswrapper[5119]: I0130 00:15:07.665220 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"default-dockercfg-g6kgg\"" Jan 30 00:15:07 crc kubenswrapper[5119]: I0130 00:15:07.747486 5119 kubelet.go:2547] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 00:15:07 crc kubenswrapper[5119]: I0130 00:15:07.747738 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" containerName="startup-monitor" containerID="cri-o://06499ae203d3f8243dc2906a22668ae0e5810d1e4aac7b26d7bbc43ce9907560" gracePeriod=5 Jan 30 00:15:07 crc kubenswrapper[5119]: I0130 00:15:07.813806 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-sa-dockercfg-wzhvk\"" Jan 30 00:15:07 crc kubenswrapper[5119]: I0130 00:15:07.948220 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-cliconfig\"" Jan 30 00:15:08 crc kubenswrapper[5119]: I0130 00:15:08.037029 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-serving-cert\"" Jan 30 00:15:08 crc kubenswrapper[5119]: I0130 00:15:08.071928 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" event={"ID":"4e650ae1-e2c6-4bd9-9399-2ef77e811f63","Type":"ContainerDied","Data":"7a14c86711d2bb29ce3a8bc10e53ba6aa965b2345bf072a8f0a1bf8dc03c7a31"} Jan 30 00:15:08 crc kubenswrapper[5119]: I0130 00:15:08.072719 5119 generic.go:358] "Generic (PLEG): container finished" podID="4e650ae1-e2c6-4bd9-9399-2ef77e811f63" containerID="7a14c86711d2bb29ce3a8bc10e53ba6aa965b2345bf072a8f0a1bf8dc03c7a31" exitCode=0 Jan 30 00:15:08 crc kubenswrapper[5119]: I0130 00:15:08.107555 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-version\"/\"kube-root-ca.crt\"" Jan 30 00:15:08 crc kubenswrapper[5119]: I0130 00:15:08.859181 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"metrics-daemon-sa-dockercfg-t8n29\"" Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.312075 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.421528 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-secret-volume\") pod \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.421601 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfd9c\" (UniqueName: \"kubernetes.io/projected/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-kube-api-access-zfd9c\") pod \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.421725 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-config-volume\") pod \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\" (UID: \"4e650ae1-e2c6-4bd9-9399-2ef77e811f63\") " Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.423593 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-config-volume" (OuterVolumeSpecName: "config-volume") pod "4e650ae1-e2c6-4bd9-9399-2ef77e811f63" (UID: "4e650ae1-e2c6-4bd9-9399-2ef77e811f63"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.431865 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4e650ae1-e2c6-4bd9-9399-2ef77e811f63" (UID: "4e650ae1-e2c6-4bd9-9399-2ef77e811f63"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.432583 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-kube-api-access-zfd9c" (OuterVolumeSpecName: "kube-api-access-zfd9c") pod "4e650ae1-e2c6-4bd9-9399-2ef77e811f63" (UID: "4e650ae1-e2c6-4bd9-9399-2ef77e811f63"). InnerVolumeSpecName "kube-api-access-zfd9c". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.523261 5119 reconciler_common.go:299] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.523316 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zfd9c\" (UniqueName: \"kubernetes.io/projected/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-kube-api-access-zfd9c\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.523334 5119 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e650ae1-e2c6-4bd9-9399-2ef77e811f63-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:09 crc kubenswrapper[5119]: I0130 00:15:09.812776 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler-operator\"/\"kube-scheduler-operator-serving-cert\"" Jan 30 00:15:10 crc kubenswrapper[5119]: I0130 00:15:09.999989 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"openshift-service-ca.crt\"" Jan 30 00:15:10 crc kubenswrapper[5119]: I0130 00:15:10.019528 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"trusted-ca-bundle\"" Jan 30 00:15:10 crc kubenswrapper[5119]: I0130 00:15:10.054169 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"catalog-operator-serving-cert\"" Jan 30 00:15:10 crc kubenswrapper[5119]: I0130 00:15:10.085674 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" Jan 30 00:15:10 crc kubenswrapper[5119]: I0130 00:15:10.085780 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-j2kkm" event={"ID":"4e650ae1-e2c6-4bd9-9399-2ef77e811f63","Type":"ContainerDied","Data":"ac6740d73fcd2cebdcf5efb8bf5b8aa366e106ab4ac6489c74f218c096e29e24"} Jan 30 00:15:10 crc kubenswrapper[5119]: I0130 00:15:10.085849 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac6740d73fcd2cebdcf5efb8bf5b8aa366e106ab4ac6489c74f218c096e29e24" Jan 30 00:15:10 crc kubenswrapper[5119]: I0130 00:15:10.085893 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-diagnostics\"/\"kube-root-ca.crt\"" Jan 30 00:15:10 crc kubenswrapper[5119]: I0130 00:15:10.105846 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"mco-proxy-tls\"" Jan 30 00:15:10 crc kubenswrapper[5119]: I0130 00:15:10.130477 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-control-plane-dockercfg-nl8tp\"" Jan 30 00:15:10 crc kubenswrapper[5119]: I0130 00:15:10.377331 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-canary\"/\"kube-root-ca.crt\"" Jan 30 00:15:11 crc kubenswrapper[5119]: I0130 00:15:11.114290 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-serving-cert\"" Jan 30 00:15:11 crc kubenswrapper[5119]: I0130 00:15:11.889071 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-marketplace-dockercfg-gg4w7\"" Jan 30 00:15:12 crc kubenswrapper[5119]: I0130 00:15:12.556587 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-serving-cert\"" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.105666 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f7dbc7e1ee9c187a863ef9b473fad27b/startup-monitor/0.log" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.105736 5119 generic.go:358] "Generic (PLEG): container finished" podID="f7dbc7e1ee9c187a863ef9b473fad27b" containerID="06499ae203d3f8243dc2906a22668ae0e5810d1e4aac7b26d7bbc43ce9907560" exitCode=137 Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.357769 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f7dbc7e1ee9c187a863ef9b473fad27b/startup-monitor/0.log" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.357838 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.477068 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir\") pod \"f7dbc7e1ee9c187a863ef9b473fad27b\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.477206 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f7dbc7e1ee9c187a863ef9b473fad27b" (UID: "f7dbc7e1ee9c187a863ef9b473fad27b"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.477315 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log\") pod \"f7dbc7e1ee9c187a863ef9b473fad27b\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.477347 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log" (OuterVolumeSpecName: "var-log") pod "f7dbc7e1ee9c187a863ef9b473fad27b" (UID: "f7dbc7e1ee9c187a863ef9b473fad27b"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.477359 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests\") pod \"f7dbc7e1ee9c187a863ef9b473fad27b\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.477431 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests" (OuterVolumeSpecName: "manifests") pod "f7dbc7e1ee9c187a863ef9b473fad27b" (UID: "f7dbc7e1ee9c187a863ef9b473fad27b"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.477528 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock\") pod \"f7dbc7e1ee9c187a863ef9b473fad27b\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.477647 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock" (OuterVolumeSpecName: "var-lock") pod "f7dbc7e1ee9c187a863ef9b473fad27b" (UID: "f7dbc7e1ee9c187a863ef9b473fad27b"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.477690 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir\") pod \"f7dbc7e1ee9c187a863ef9b473fad27b\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.478137 5119 reconciler_common.go:299] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.478159 5119 reconciler_common.go:299] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.478175 5119 reconciler_common.go:299] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.478188 5119 reconciler_common.go:299] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.488816 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f7dbc7e1ee9c187a863ef9b473fad27b" (UID: "f7dbc7e1ee9c187a863ef9b473fad27b"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:15:13 crc kubenswrapper[5119]: I0130 00:15:13.579321 5119 reconciler_common.go:299] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:14 crc kubenswrapper[5119]: I0130 00:15:14.115506 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f7dbc7e1ee9c187a863ef9b473fad27b/startup-monitor/0.log" Jan 30 00:15:14 crc kubenswrapper[5119]: I0130 00:15:14.115653 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:15:14 crc kubenswrapper[5119]: I0130 00:15:14.115695 5119 scope.go:117] "RemoveContainer" containerID="06499ae203d3f8243dc2906a22668ae0e5810d1e4aac7b26d7bbc43ce9907560" Jan 30 00:15:14 crc kubenswrapper[5119]: I0130 00:15:14.757356 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" path="/var/lib/kubelet/pods/f7dbc7e1ee9c187a863ef9b473fad27b/volumes" Jan 30 00:15:14 crc kubenswrapper[5119]: I0130 00:15:14.758253 5119 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 30 00:15:14 crc kubenswrapper[5119]: I0130 00:15:14.773973 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 00:15:14 crc kubenswrapper[5119]: I0130 00:15:14.774023 5119 kubelet.go:2759] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ad462daa-9d20-4ea7-9201-a5684b9dc3e1" Jan 30 00:15:14 crc kubenswrapper[5119]: I0130 00:15:14.782465 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 00:15:14 crc kubenswrapper[5119]: I0130 00:15:14.782536 5119 kubelet.go:2784] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ad462daa-9d20-4ea7-9201-a5684b9dc3e1" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.384317 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-6x6hj"] Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.385085 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" podUID="8168819b-6cca-4680-a37d-ade6172d7778" containerName="controller-manager" containerID="cri-o://ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505" gracePeriod=30 Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.389964 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv"] Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.390460 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" podUID="0c6013b9-649a-4f77-a54b-c272bbbdf392" containerName="route-controller-manager" containerID="cri-o://cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253" gracePeriod=30 Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.762565 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.772777 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.804987 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-555c55766b-jdqhk"] Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806034 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="4e650ae1-e2c6-4bd9-9399-2ef77e811f63" containerName="collect-profiles" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806062 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e650ae1-e2c6-4bd9-9399-2ef77e811f63" containerName="collect-profiles" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806078 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" containerName="startup-monitor" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806086 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" containerName="startup-monitor" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806109 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="0c6013b9-649a-4f77-a54b-c272bbbdf392" containerName="route-controller-manager" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806121 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c6013b9-649a-4f77-a54b-c272bbbdf392" containerName="route-controller-manager" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806133 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="8168819b-6cca-4680-a37d-ade6172d7778" containerName="controller-manager" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806141 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="8168819b-6cca-4680-a37d-ade6172d7778" containerName="controller-manager" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806292 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="0c6013b9-649a-4f77-a54b-c272bbbdf392" containerName="route-controller-manager" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806312 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="4e650ae1-e2c6-4bd9-9399-2ef77e811f63" containerName="collect-profiles" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806322 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="8168819b-6cca-4680-a37d-ade6172d7778" containerName="controller-manager" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.806333 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" containerName="startup-monitor" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.815859 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-555c55766b-jdqhk"] Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.816042 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.831218 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd"] Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.838349 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd"] Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.838703 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.845010 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8168819b-6cca-4680-a37d-ade6172d7778-tmp\") pod \"8168819b-6cca-4680-a37d-ade6172d7778\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.845232 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-config\") pod \"8168819b-6cca-4680-a37d-ade6172d7778\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.845387 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-config\") pod \"0c6013b9-649a-4f77-a54b-c272bbbdf392\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.845776 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0c6013b9-649a-4f77-a54b-c272bbbdf392-tmp\") pod \"0c6013b9-649a-4f77-a54b-c272bbbdf392\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.846383 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lw6h\" (UniqueName: \"kubernetes.io/projected/0c6013b9-649a-4f77-a54b-c272bbbdf392-kube-api-access-6lw6h\") pod \"0c6013b9-649a-4f77-a54b-c272bbbdf392\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.846563 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dd77\" (UniqueName: \"kubernetes.io/projected/8168819b-6cca-4680-a37d-ade6172d7778-kube-api-access-6dd77\") pod \"8168819b-6cca-4680-a37d-ade6172d7778\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.846689 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-proxy-ca-bundles\") pod \"8168819b-6cca-4680-a37d-ade6172d7778\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.846761 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c6013b9-649a-4f77-a54b-c272bbbdf392-serving-cert\") pod \"0c6013b9-649a-4f77-a54b-c272bbbdf392\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.846863 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8168819b-6cca-4680-a37d-ade6172d7778-serving-cert\") pod \"8168819b-6cca-4680-a37d-ade6172d7778\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.846952 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-client-ca\") pod \"8168819b-6cca-4680-a37d-ade6172d7778\" (UID: \"8168819b-6cca-4680-a37d-ade6172d7778\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.847055 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-client-ca\") pod \"0c6013b9-649a-4f77-a54b-c272bbbdf392\" (UID: \"0c6013b9-649a-4f77-a54b-c272bbbdf392\") " Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.847332 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ggcp\" (UniqueName: \"kubernetes.io/projected/601c1b9a-5708-4039-99b1-1492e0352516-kube-api-access-5ggcp\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.848013 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-proxy-ca-bundles\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.848129 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/601c1b9a-5708-4039-99b1-1492e0352516-serving-cert\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.848238 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-config\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.848387 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/601c1b9a-5708-4039-99b1-1492e0352516-tmp\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.848484 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-client-ca\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.847497 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8168819b-6cca-4680-a37d-ade6172d7778-tmp" (OuterVolumeSpecName: "tmp") pod "8168819b-6cca-4680-a37d-ade6172d7778" (UID: "8168819b-6cca-4680-a37d-ade6172d7778"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.849221 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c6013b9-649a-4f77-a54b-c272bbbdf392-tmp" (OuterVolumeSpecName: "tmp") pod "0c6013b9-649a-4f77-a54b-c272bbbdf392" (UID: "0c6013b9-649a-4f77-a54b-c272bbbdf392"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.849467 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-config" (OuterVolumeSpecName: "config") pod "0c6013b9-649a-4f77-a54b-c272bbbdf392" (UID: "0c6013b9-649a-4f77-a54b-c272bbbdf392"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.849511 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-client-ca" (OuterVolumeSpecName: "client-ca") pod "0c6013b9-649a-4f77-a54b-c272bbbdf392" (UID: "0c6013b9-649a-4f77-a54b-c272bbbdf392"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.850637 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-config" (OuterVolumeSpecName: "config") pod "8168819b-6cca-4680-a37d-ade6172d7778" (UID: "8168819b-6cca-4680-a37d-ade6172d7778"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.850639 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "8168819b-6cca-4680-a37d-ade6172d7778" (UID: "8168819b-6cca-4680-a37d-ade6172d7778"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.852175 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.852225 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.852290 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8168819b-6cca-4680-a37d-ade6172d7778-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.852317 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.852334 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6013b9-649a-4f77-a54b-c272bbbdf392-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.852347 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0c6013b9-649a-4f77-a54b-c272bbbdf392-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.853492 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-client-ca" (OuterVolumeSpecName: "client-ca") pod "8168819b-6cca-4680-a37d-ade6172d7778" (UID: "8168819b-6cca-4680-a37d-ade6172d7778"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.859464 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c6013b9-649a-4f77-a54b-c272bbbdf392-kube-api-access-6lw6h" (OuterVolumeSpecName: "kube-api-access-6lw6h") pod "0c6013b9-649a-4f77-a54b-c272bbbdf392" (UID: "0c6013b9-649a-4f77-a54b-c272bbbdf392"). InnerVolumeSpecName "kube-api-access-6lw6h". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.859758 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8168819b-6cca-4680-a37d-ade6172d7778-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8168819b-6cca-4680-a37d-ade6172d7778" (UID: "8168819b-6cca-4680-a37d-ade6172d7778"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.859776 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8168819b-6cca-4680-a37d-ade6172d7778-kube-api-access-6dd77" (OuterVolumeSpecName: "kube-api-access-6dd77") pod "8168819b-6cca-4680-a37d-ade6172d7778" (UID: "8168819b-6cca-4680-a37d-ade6172d7778"). InnerVolumeSpecName "kube-api-access-6dd77". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.860855 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c6013b9-649a-4f77-a54b-c272bbbdf392-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0c6013b9-649a-4f77-a54b-c272bbbdf392" (UID: "0c6013b9-649a-4f77-a54b-c272bbbdf392"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.956196 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79ss8\" (UniqueName: \"kubernetes.io/projected/63718f7a-b919-4efa-a20f-49b0a552c69e-kube-api-access-79ss8\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.956278 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-client-ca\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.956476 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-config\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.956676 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/601c1b9a-5708-4039-99b1-1492e0352516-tmp\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.956702 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-client-ca\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.956864 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-5ggcp\" (UniqueName: \"kubernetes.io/projected/601c1b9a-5708-4039-99b1-1492e0352516-kube-api-access-5ggcp\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.956987 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63718f7a-b919-4efa-a20f-49b0a552c69e-serving-cert\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.957070 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-proxy-ca-bundles\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.957099 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/63718f7a-b919-4efa-a20f-49b0a552c69e-tmp\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.958328 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/601c1b9a-5708-4039-99b1-1492e0352516-serving-cert\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.958415 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-config\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.958640 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-6lw6h\" (UniqueName: \"kubernetes.io/projected/0c6013b9-649a-4f77-a54b-c272bbbdf392-kube-api-access-6lw6h\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.959239 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-6dd77\" (UniqueName: \"kubernetes.io/projected/8168819b-6cca-4680-a37d-ade6172d7778-kube-api-access-6dd77\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.959257 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c6013b9-649a-4f77-a54b-c272bbbdf392-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.959286 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8168819b-6cca-4680-a37d-ade6172d7778-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.959321 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8168819b-6cca-4680-a37d-ade6172d7778-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.959472 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-client-ca\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.963891 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/601c1b9a-5708-4039-99b1-1492e0352516-tmp\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.965202 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-proxy-ca-bundles\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.969577 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-config\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.978877 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/601c1b9a-5708-4039-99b1-1492e0352516-serving-cert\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:34 crc kubenswrapper[5119]: I0130 00:15:34.978915 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ggcp\" (UniqueName: \"kubernetes.io/projected/601c1b9a-5708-4039-99b1-1492e0352516-kube-api-access-5ggcp\") pod \"controller-manager-555c55766b-jdqhk\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.061142 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63718f7a-b919-4efa-a20f-49b0a552c69e-serving-cert\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.061676 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/63718f7a-b919-4efa-a20f-49b0a552c69e-tmp\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.061861 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-79ss8\" (UniqueName: \"kubernetes.io/projected/63718f7a-b919-4efa-a20f-49b0a552c69e-kube-api-access-79ss8\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.062000 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-client-ca\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.062143 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-config\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.062380 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/63718f7a-b919-4efa-a20f-49b0a552c69e-tmp\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.064466 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-config\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.065435 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-client-ca\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.066827 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63718f7a-b919-4efa-a20f-49b0a552c69e-serving-cert\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.078660 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-79ss8\" (UniqueName: \"kubernetes.io/projected/63718f7a-b919-4efa-a20f-49b0a552c69e-kube-api-access-79ss8\") pod \"route-controller-manager-d8cf998c4-mmvwd\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.135818 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.181335 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.247645 5119 generic.go:358] "Generic (PLEG): container finished" podID="0c6013b9-649a-4f77-a54b-c272bbbdf392" containerID="cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253" exitCode=0 Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.247769 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" event={"ID":"0c6013b9-649a-4f77-a54b-c272bbbdf392","Type":"ContainerDied","Data":"cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253"} Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.247801 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" event={"ID":"0c6013b9-649a-4f77-a54b-c272bbbdf392","Type":"ContainerDied","Data":"827e1a6a18163f6bdb6ebd244ba8446ccd09192417032b44fa5f239660fcbb52"} Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.247822 5119 scope.go:117] "RemoveContainer" containerID="cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.248009 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.267270 5119 generic.go:358] "Generic (PLEG): container finished" podID="8168819b-6cca-4680-a37d-ade6172d7778" containerID="ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505" exitCode=0 Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.267516 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" event={"ID":"8168819b-6cca-4680-a37d-ade6172d7778","Type":"ContainerDied","Data":"ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505"} Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.267550 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" event={"ID":"8168819b-6cca-4680-a37d-ade6172d7778","Type":"ContainerDied","Data":"499a856683a97ce279a85b1f1235dd9123c3cce8869f3cf44dc5df6029257e48"} Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.268734 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-65b6cccf98-6x6hj" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.284760 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv"] Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.293149 5119 scope.go:117] "RemoveContainer" containerID="cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253" Jan 30 00:15:35 crc kubenswrapper[5119]: E0130 00:15:35.294308 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253\": container with ID starting with cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253 not found: ID does not exist" containerID="cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.294344 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253"} err="failed to get container status \"cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253\": rpc error: code = NotFound desc = could not find container \"cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253\": container with ID starting with cfed738b00e11fc97a84cf38fe02f9b74b91369a96b6ed0398e32c45510f6253 not found: ID does not exist" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.294366 5119 scope.go:117] "RemoveContainer" containerID="ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.295684 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-8sggv"] Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.307057 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-6x6hj"] Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.311537 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-6x6hj"] Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.317970 5119 scope.go:117] "RemoveContainer" containerID="ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505" Jan 30 00:15:35 crc kubenswrapper[5119]: E0130 00:15:35.319881 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505\": container with ID starting with ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505 not found: ID does not exist" containerID="ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.319935 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505"} err="failed to get container status \"ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505\": rpc error: code = NotFound desc = could not find container \"ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505\": container with ID starting with ad17c9c706a9d419e87439e3ae4a8d022622f09133e1bdbc937b2f326387e505 not found: ID does not exist" Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.382561 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-555c55766b-jdqhk"] Jan 30 00:15:35 crc kubenswrapper[5119]: I0130 00:15:35.406293 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd"] Jan 30 00:15:35 crc kubenswrapper[5119]: W0130 00:15:35.409181 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63718f7a_b919_4efa_a20f_49b0a552c69e.slice/crio-77434388dba13d92cef4734b5d6decb62a0da13781024fa6628c56a62292b153 WatchSource:0}: Error finding container 77434388dba13d92cef4734b5d6decb62a0da13781024fa6628c56a62292b153: Status 404 returned error can't find the container with id 77434388dba13d92cef4734b5d6decb62a0da13781024fa6628c56a62292b153 Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.273218 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" event={"ID":"601c1b9a-5708-4039-99b1-1492e0352516","Type":"ContainerStarted","Data":"9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09"} Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.273545 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.273556 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" event={"ID":"601c1b9a-5708-4039-99b1-1492e0352516","Type":"ContainerStarted","Data":"14ecd3613df264335d2f1d1b727e82b01624fe98c187a6765bdcb7ddd453fe10"} Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.277480 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" event={"ID":"63718f7a-b919-4efa-a20f-49b0a552c69e","Type":"ContainerStarted","Data":"4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355"} Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.277521 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" event={"ID":"63718f7a-b919-4efa-a20f-49b0a552c69e","Type":"ContainerStarted","Data":"77434388dba13d92cef4734b5d6decb62a0da13781024fa6628c56a62292b153"} Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.277678 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.300913 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" podStartSLOduration=2.300893905 podStartE2EDuration="2.300893905s" podCreationTimestamp="2026-01-30 00:15:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:15:36.292773647 +0000 UTC m=+340.306836116" watchObservedRunningTime="2026-01-30 00:15:36.300893905 +0000 UTC m=+340.314956384" Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.311906 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" podStartSLOduration=2.311884734 podStartE2EDuration="2.311884734s" podCreationTimestamp="2026-01-30 00:15:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:15:36.311844083 +0000 UTC m=+340.325906562" watchObservedRunningTime="2026-01-30 00:15:36.311884734 +0000 UTC m=+340.325947193" Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.478956 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.755952 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c6013b9-649a-4f77-a54b-c272bbbdf392" path="/var/lib/kubelet/pods/0c6013b9-649a-4f77-a54b-c272bbbdf392/volumes" Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.756616 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8168819b-6cca-4680-a37d-ade6172d7778" path="/var/lib/kubelet/pods/8168819b-6cca-4680-a37d-ade6172d7778/volumes" Jan 30 00:15:36 crc kubenswrapper[5119]: I0130 00:15:36.782156 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:41 crc kubenswrapper[5119]: I0130 00:15:41.674476 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-555c55766b-jdqhk"] Jan 30 00:15:41 crc kubenswrapper[5119]: I0130 00:15:41.675045 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" podUID="601c1b9a-5708-4039-99b1-1492e0352516" containerName="controller-manager" containerID="cri-o://9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09" gracePeriod=30 Jan 30 00:15:41 crc kubenswrapper[5119]: I0130 00:15:41.703400 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd"] Jan 30 00:15:41 crc kubenswrapper[5119]: I0130 00:15:41.703646 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" podUID="63718f7a-b919-4efa-a20f-49b0a552c69e" containerName="route-controller-manager" containerID="cri-o://4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355" gracePeriod=30 Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.060635 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.070048 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.094917 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt"] Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.095568 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="63718f7a-b919-4efa-a20f-49b0a552c69e" containerName="route-controller-manager" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.095586 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="63718f7a-b919-4efa-a20f-49b0a552c69e" containerName="route-controller-manager" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.095594 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="601c1b9a-5708-4039-99b1-1492e0352516" containerName="controller-manager" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.095605 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="601c1b9a-5708-4039-99b1-1492e0352516" containerName="controller-manager" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.095716 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="601c1b9a-5708-4039-99b1-1492e0352516" containerName="controller-manager" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.095732 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="63718f7a-b919-4efa-a20f-49b0a552c69e" containerName="route-controller-manager" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.099134 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.124282 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt"] Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.129085 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-86f879749f-6m6gc"] Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.136145 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-86f879749f-6m6gc"] Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.136307 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151196 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/601c1b9a-5708-4039-99b1-1492e0352516-serving-cert\") pod \"601c1b9a-5708-4039-99b1-1492e0352516\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151246 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-client-ca\") pod \"63718f7a-b919-4efa-a20f-49b0a552c69e\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151299 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63718f7a-b919-4efa-a20f-49b0a552c69e-serving-cert\") pod \"63718f7a-b919-4efa-a20f-49b0a552c69e\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151334 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79ss8\" (UniqueName: \"kubernetes.io/projected/63718f7a-b919-4efa-a20f-49b0a552c69e-kube-api-access-79ss8\") pod \"63718f7a-b919-4efa-a20f-49b0a552c69e\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151382 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/63718f7a-b919-4efa-a20f-49b0a552c69e-tmp\") pod \"63718f7a-b919-4efa-a20f-49b0a552c69e\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151424 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-client-ca\") pod \"601c1b9a-5708-4039-99b1-1492e0352516\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151468 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-proxy-ca-bundles\") pod \"601c1b9a-5708-4039-99b1-1492e0352516\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151495 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-config\") pod \"63718f7a-b919-4efa-a20f-49b0a552c69e\" (UID: \"63718f7a-b919-4efa-a20f-49b0a552c69e\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151521 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ggcp\" (UniqueName: \"kubernetes.io/projected/601c1b9a-5708-4039-99b1-1492e0352516-kube-api-access-5ggcp\") pod \"601c1b9a-5708-4039-99b1-1492e0352516\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151573 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-config\") pod \"601c1b9a-5708-4039-99b1-1492e0352516\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151611 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/601c1b9a-5708-4039-99b1-1492e0352516-tmp\") pod \"601c1b9a-5708-4039-99b1-1492e0352516\" (UID: \"601c1b9a-5708-4039-99b1-1492e0352516\") " Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151823 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-config\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151859 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/420d6f21-0f07-4b0f-9b9c-129c92015b7f-serving-cert\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.151909 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/420d6f21-0f07-4b0f-9b9c-129c92015b7f-tmp\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.152007 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-client-ca\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.152036 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z98mz\" (UniqueName: \"kubernetes.io/projected/420d6f21-0f07-4b0f-9b9c-129c92015b7f-kube-api-access-z98mz\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.154516 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-client-ca" (OuterVolumeSpecName: "client-ca") pod "63718f7a-b919-4efa-a20f-49b0a552c69e" (UID: "63718f7a-b919-4efa-a20f-49b0a552c69e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.154816 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/601c1b9a-5708-4039-99b1-1492e0352516-tmp" (OuterVolumeSpecName: "tmp") pod "601c1b9a-5708-4039-99b1-1492e0352516" (UID: "601c1b9a-5708-4039-99b1-1492e0352516"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.154847 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-client-ca" (OuterVolumeSpecName: "client-ca") pod "601c1b9a-5708-4039-99b1-1492e0352516" (UID: "601c1b9a-5708-4039-99b1-1492e0352516"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.155096 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63718f7a-b919-4efa-a20f-49b0a552c69e-tmp" (OuterVolumeSpecName: "tmp") pod "63718f7a-b919-4efa-a20f-49b0a552c69e" (UID: "63718f7a-b919-4efa-a20f-49b0a552c69e"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.155305 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "601c1b9a-5708-4039-99b1-1492e0352516" (UID: "601c1b9a-5708-4039-99b1-1492e0352516"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.155324 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-config" (OuterVolumeSpecName: "config") pod "601c1b9a-5708-4039-99b1-1492e0352516" (UID: "601c1b9a-5708-4039-99b1-1492e0352516"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.155530 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-config" (OuterVolumeSpecName: "config") pod "63718f7a-b919-4efa-a20f-49b0a552c69e" (UID: "63718f7a-b919-4efa-a20f-49b0a552c69e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.160459 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/601c1b9a-5708-4039-99b1-1492e0352516-kube-api-access-5ggcp" (OuterVolumeSpecName: "kube-api-access-5ggcp") pod "601c1b9a-5708-4039-99b1-1492e0352516" (UID: "601c1b9a-5708-4039-99b1-1492e0352516"). InnerVolumeSpecName "kube-api-access-5ggcp". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.160549 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63718f7a-b919-4efa-a20f-49b0a552c69e-kube-api-access-79ss8" (OuterVolumeSpecName: "kube-api-access-79ss8") pod "63718f7a-b919-4efa-a20f-49b0a552c69e" (UID: "63718f7a-b919-4efa-a20f-49b0a552c69e"). InnerVolumeSpecName "kube-api-access-79ss8". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.160675 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/601c1b9a-5708-4039-99b1-1492e0352516-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "601c1b9a-5708-4039-99b1-1492e0352516" (UID: "601c1b9a-5708-4039-99b1-1492e0352516"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.163465 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63718f7a-b919-4efa-a20f-49b0a552c69e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "63718f7a-b919-4efa-a20f-49b0a552c69e" (UID: "63718f7a-b919-4efa-a20f-49b0a552c69e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.252712 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/420d6f21-0f07-4b0f-9b9c-129c92015b7f-tmp\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.252775 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-proxy-ca-bundles\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.252838 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2df03843-0d55-4a23-95ed-389f35db9a4e-serving-cert\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.252866 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-client-ca\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.252882 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-z98mz\" (UniqueName: \"kubernetes.io/projected/420d6f21-0f07-4b0f-9b9c-129c92015b7f-kube-api-access-z98mz\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.252900 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2df03843-0d55-4a23-95ed-389f35db9a4e-tmp\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.252987 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7h7n4\" (UniqueName: \"kubernetes.io/projected/2df03843-0d55-4a23-95ed-389f35db9a4e-kube-api-access-7h7n4\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253050 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-config\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253068 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/420d6f21-0f07-4b0f-9b9c-129c92015b7f-serving-cert\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253088 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-client-ca\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253105 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-config\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253151 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/601c1b9a-5708-4039-99b1-1492e0352516-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253162 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253170 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63718f7a-b919-4efa-a20f-49b0a552c69e-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253178 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-79ss8\" (UniqueName: \"kubernetes.io/projected/63718f7a-b919-4efa-a20f-49b0a552c69e-kube-api-access-79ss8\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253186 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/63718f7a-b919-4efa-a20f-49b0a552c69e-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253194 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253202 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253211 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63718f7a-b919-4efa-a20f-49b0a552c69e-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253218 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-5ggcp\" (UniqueName: \"kubernetes.io/projected/601c1b9a-5708-4039-99b1-1492e0352516-kube-api-access-5ggcp\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253226 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601c1b9a-5708-4039-99b1-1492e0352516-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253234 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/601c1b9a-5708-4039-99b1-1492e0352516-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.253879 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-client-ca\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.254114 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-config\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.254601 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/420d6f21-0f07-4b0f-9b9c-129c92015b7f-tmp\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.258455 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/420d6f21-0f07-4b0f-9b9c-129c92015b7f-serving-cert\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.280722 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-z98mz\" (UniqueName: \"kubernetes.io/projected/420d6f21-0f07-4b0f-9b9c-129c92015b7f-kube-api-access-z98mz\") pod \"route-controller-manager-6778fffc64-gm5mt\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.317001 5119 generic.go:358] "Generic (PLEG): container finished" podID="63718f7a-b919-4efa-a20f-49b0a552c69e" containerID="4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355" exitCode=0 Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.317086 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.317093 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" event={"ID":"63718f7a-b919-4efa-a20f-49b0a552c69e","Type":"ContainerDied","Data":"4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355"} Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.317130 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd" event={"ID":"63718f7a-b919-4efa-a20f-49b0a552c69e","Type":"ContainerDied","Data":"77434388dba13d92cef4734b5d6decb62a0da13781024fa6628c56a62292b153"} Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.317146 5119 scope.go:117] "RemoveContainer" containerID="4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.321571 5119 generic.go:358] "Generic (PLEG): container finished" podID="601c1b9a-5708-4039-99b1-1492e0352516" containerID="9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09" exitCode=0 Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.321640 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.321670 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" event={"ID":"601c1b9a-5708-4039-99b1-1492e0352516","Type":"ContainerDied","Data":"9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09"} Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.321706 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-555c55766b-jdqhk" event={"ID":"601c1b9a-5708-4039-99b1-1492e0352516","Type":"ContainerDied","Data":"14ecd3613df264335d2f1d1b727e82b01624fe98c187a6765bdcb7ddd453fe10"} Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.378742 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2df03843-0d55-4a23-95ed-389f35db9a4e-serving-cert\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.378817 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2df03843-0d55-4a23-95ed-389f35db9a4e-tmp\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.378899 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-7h7n4\" (UniqueName: \"kubernetes.io/projected/2df03843-0d55-4a23-95ed-389f35db9a4e-kube-api-access-7h7n4\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.378960 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-client-ca\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.378987 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-config\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.379251 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-proxy-ca-bundles\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.379799 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2df03843-0d55-4a23-95ed-389f35db9a4e-tmp\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.380203 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-client-ca\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.380968 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-config\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.382811 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-proxy-ca-bundles\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.383672 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2df03843-0d55-4a23-95ed-389f35db9a4e-serving-cert\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.395306 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-7h7n4\" (UniqueName: \"kubernetes.io/projected/2df03843-0d55-4a23-95ed-389f35db9a4e-kube-api-access-7h7n4\") pod \"controller-manager-86f879749f-6m6gc\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.395442 5119 scope.go:117] "RemoveContainer" containerID="4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355" Jan 30 00:15:42 crc kubenswrapper[5119]: E0130 00:15:42.396142 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355\": container with ID starting with 4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355 not found: ID does not exist" containerID="4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.396226 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355"} err="failed to get container status \"4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355\": rpc error: code = NotFound desc = could not find container \"4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355\": container with ID starting with 4d33e3e78c33f48bf13ca9098dc021531df62d032501ba2600d9116401845355 not found: ID does not exist" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.396317 5119 scope.go:117] "RemoveContainer" containerID="9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.399046 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-555c55766b-jdqhk"] Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.405768 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-555c55766b-jdqhk"] Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.410259 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd"] Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.415705 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d8cf998c4-mmvwd"] Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.419662 5119 scope.go:117] "RemoveContainer" containerID="9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09" Jan 30 00:15:42 crc kubenswrapper[5119]: E0130 00:15:42.423925 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09\": container with ID starting with 9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09 not found: ID does not exist" containerID="9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.423988 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09"} err="failed to get container status \"9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09\": rpc error: code = NotFound desc = could not find container \"9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09\": container with ID starting with 9fc928af96f6ca09eba6e450ecf70ae4333cab66c4a88d8d9f8c885c3d3b1a09 not found: ID does not exist" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.425918 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.454021 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.657092 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-86f879749f-6m6gc"] Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.755004 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="601c1b9a-5708-4039-99b1-1492e0352516" path="/var/lib/kubelet/pods/601c1b9a-5708-4039-99b1-1492e0352516/volumes" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.755579 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63718f7a-b919-4efa-a20f-49b0a552c69e" path="/var/lib/kubelet/pods/63718f7a-b919-4efa-a20f-49b0a552c69e/volumes" Jan 30 00:15:42 crc kubenswrapper[5119]: I0130 00:15:42.827177 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt"] Jan 30 00:15:42 crc kubenswrapper[5119]: W0130 00:15:42.841721 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod420d6f21_0f07_4b0f_9b9c_129c92015b7f.slice/crio-50b31be490a8f0ee07f1176242cabe9d3eaac32f63e2579685da558b2966c8d7 WatchSource:0}: Error finding container 50b31be490a8f0ee07f1176242cabe9d3eaac32f63e2579685da558b2966c8d7: Status 404 returned error can't find the container with id 50b31be490a8f0ee07f1176242cabe9d3eaac32f63e2579685da558b2966c8d7 Jan 30 00:15:43 crc kubenswrapper[5119]: I0130 00:15:43.329069 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" event={"ID":"2df03843-0d55-4a23-95ed-389f35db9a4e","Type":"ContainerStarted","Data":"98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f"} Jan 30 00:15:43 crc kubenswrapper[5119]: I0130 00:15:43.329116 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" event={"ID":"2df03843-0d55-4a23-95ed-389f35db9a4e","Type":"ContainerStarted","Data":"f6dcb0a04aa14ef872133f22e6330db8d73fbc642d25f85278f1cb6168ec05cb"} Jan 30 00:15:43 crc kubenswrapper[5119]: I0130 00:15:43.329136 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:43 crc kubenswrapper[5119]: I0130 00:15:43.330326 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" event={"ID":"420d6f21-0f07-4b0f-9b9c-129c92015b7f","Type":"ContainerStarted","Data":"de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f"} Jan 30 00:15:43 crc kubenswrapper[5119]: I0130 00:15:43.330364 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" event={"ID":"420d6f21-0f07-4b0f-9b9c-129c92015b7f","Type":"ContainerStarted","Data":"50b31be490a8f0ee07f1176242cabe9d3eaac32f63e2579685da558b2966c8d7"} Jan 30 00:15:43 crc kubenswrapper[5119]: I0130 00:15:43.330568 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:43 crc kubenswrapper[5119]: I0130 00:15:43.347908 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" podStartSLOduration=2.34788942 podStartE2EDuration="2.34788942s" podCreationTimestamp="2026-01-30 00:15:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:15:43.347212824 +0000 UTC m=+347.361275273" watchObservedRunningTime="2026-01-30 00:15:43.34788942 +0000 UTC m=+347.361951879" Jan 30 00:15:43 crc kubenswrapper[5119]: I0130 00:15:43.364650 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" podStartSLOduration=2.364630879 podStartE2EDuration="2.364630879s" podCreationTimestamp="2026-01-30 00:15:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:15:43.359473903 +0000 UTC m=+347.373536372" watchObservedRunningTime="2026-01-30 00:15:43.364630879 +0000 UTC m=+347.378693338" Jan 30 00:15:43 crc kubenswrapper[5119]: I0130 00:15:43.552318 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:43 crc kubenswrapper[5119]: I0130 00:15:43.913518 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:45 crc kubenswrapper[5119]: I0130 00:15:45.687066 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-86f879749f-6m6gc"] Jan 30 00:15:45 crc kubenswrapper[5119]: I0130 00:15:45.700340 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt"] Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.347364 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" podUID="420d6f21-0f07-4b0f-9b9c-129c92015b7f" containerName="route-controller-manager" containerID="cri-o://de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f" gracePeriod=30 Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.348322 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" podUID="2df03843-0d55-4a23-95ed-389f35db9a4e" containerName="controller-manager" containerID="cri-o://98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f" gracePeriod=30 Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.744938 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.785713 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6"] Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.786754 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="420d6f21-0f07-4b0f-9b9c-129c92015b7f" containerName="route-controller-manager" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.786781 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="420d6f21-0f07-4b0f-9b9c-129c92015b7f" containerName="route-controller-manager" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.786921 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="420d6f21-0f07-4b0f-9b9c-129c92015b7f" containerName="route-controller-manager" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.791738 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.794352 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6"] Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.794472 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.818626 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7765df77bd-6k425"] Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.819339 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="2df03843-0d55-4a23-95ed-389f35db9a4e" containerName="controller-manager" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.819435 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="2df03843-0d55-4a23-95ed-389f35db9a4e" containerName="controller-manager" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.819598 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="2df03843-0d55-4a23-95ed-389f35db9a4e" containerName="controller-manager" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.823098 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.845784 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/19fdb149-a6fa-4242-bfa0-2e0959c0446f-serving-cert\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.846020 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-client-ca\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.846156 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-config\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.846245 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/19fdb149-a6fa-4242-bfa0-2e0959c0446f-tmp\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.846383 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntm42\" (UniqueName: \"kubernetes.io/projected/19fdb149-a6fa-4242-bfa0-2e0959c0446f-kube-api-access-ntm42\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.851756 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7765df77bd-6k425"] Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.946958 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z98mz\" (UniqueName: \"kubernetes.io/projected/420d6f21-0f07-4b0f-9b9c-129c92015b7f-kube-api-access-z98mz\") pod \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.947511 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-proxy-ca-bundles\") pod \"2df03843-0d55-4a23-95ed-389f35db9a4e\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.947652 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/420d6f21-0f07-4b0f-9b9c-129c92015b7f-serving-cert\") pod \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.947776 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7h7n4\" (UniqueName: \"kubernetes.io/projected/2df03843-0d55-4a23-95ed-389f35db9a4e-kube-api-access-7h7n4\") pod \"2df03843-0d55-4a23-95ed-389f35db9a4e\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.947915 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/420d6f21-0f07-4b0f-9b9c-129c92015b7f-tmp\") pod \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.948033 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-client-ca\") pod \"2df03843-0d55-4a23-95ed-389f35db9a4e\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.948143 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-config\") pod \"2df03843-0d55-4a23-95ed-389f35db9a4e\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.948828 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-client-ca\") pod \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.948097 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/420d6f21-0f07-4b0f-9b9c-129c92015b7f-tmp" (OuterVolumeSpecName: "tmp") pod "420d6f21-0f07-4b0f-9b9c-129c92015b7f" (UID: "420d6f21-0f07-4b0f-9b9c-129c92015b7f"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.948333 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "2df03843-0d55-4a23-95ed-389f35db9a4e" (UID: "2df03843-0d55-4a23-95ed-389f35db9a4e"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.948447 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-client-ca" (OuterVolumeSpecName: "client-ca") pod "2df03843-0d55-4a23-95ed-389f35db9a4e" (UID: "2df03843-0d55-4a23-95ed-389f35db9a4e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.948756 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-config" (OuterVolumeSpecName: "config") pod "2df03843-0d55-4a23-95ed-389f35db9a4e" (UID: "2df03843-0d55-4a23-95ed-389f35db9a4e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.949460 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-client-ca" (OuterVolumeSpecName: "client-ca") pod "420d6f21-0f07-4b0f-9b9c-129c92015b7f" (UID: "420d6f21-0f07-4b0f-9b9c-129c92015b7f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.949840 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2df03843-0d55-4a23-95ed-389f35db9a4e-tmp" (OuterVolumeSpecName: "tmp") pod "2df03843-0d55-4a23-95ed-389f35db9a4e" (UID: "2df03843-0d55-4a23-95ed-389f35db9a4e"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.949548 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2df03843-0d55-4a23-95ed-389f35db9a4e-tmp\") pod \"2df03843-0d55-4a23-95ed-389f35db9a4e\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.951168 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2df03843-0d55-4a23-95ed-389f35db9a4e-serving-cert\") pod \"2df03843-0d55-4a23-95ed-389f35db9a4e\" (UID: \"2df03843-0d55-4a23-95ed-389f35db9a4e\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.951631 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-config\") pod \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\" (UID: \"420d6f21-0f07-4b0f-9b9c-129c92015b7f\") " Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.951932 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/19fdb149-a6fa-4242-bfa0-2e0959c0446f-tmp\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.952078 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f10193a8-86e9-44ff-8668-552d039f4117-tmp\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.952174 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxtsw\" (UniqueName: \"kubernetes.io/projected/f10193a8-86e9-44ff-8668-552d039f4117-kube-api-access-zxtsw\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.952285 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/19fdb149-a6fa-4242-bfa0-2e0959c0446f-tmp\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.952106 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-config" (OuterVolumeSpecName: "config") pod "420d6f21-0f07-4b0f-9b9c-129c92015b7f" (UID: "420d6f21-0f07-4b0f-9b9c-129c92015b7f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.952425 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-ntm42\" (UniqueName: \"kubernetes.io/projected/19fdb149-a6fa-4242-bfa0-2e0959c0446f-kube-api-access-ntm42\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.952537 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-client-ca\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.952667 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-proxy-ca-bundles\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.952822 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f10193a8-86e9-44ff-8668-552d039f4117-serving-cert\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.953023 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/19fdb149-a6fa-4242-bfa0-2e0959c0446f-serving-cert\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.953161 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-client-ca\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.953312 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-config\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.953454 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-config\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.953608 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/420d6f21-0f07-4b0f-9b9c-129c92015b7f-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.953740 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.953849 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.953961 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.954077 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2df03843-0d55-4a23-95ed-389f35db9a4e-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.954172 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/420d6f21-0f07-4b0f-9b9c-129c92015b7f-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.954266 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2df03843-0d55-4a23-95ed-389f35db9a4e-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.954183 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-client-ca\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.953582 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2df03843-0d55-4a23-95ed-389f35db9a4e-kube-api-access-7h7n4" (OuterVolumeSpecName: "kube-api-access-7h7n4") pod "2df03843-0d55-4a23-95ed-389f35db9a4e" (UID: "2df03843-0d55-4a23-95ed-389f35db9a4e"). InnerVolumeSpecName "kube-api-access-7h7n4". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.954089 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/420d6f21-0f07-4b0f-9b9c-129c92015b7f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "420d6f21-0f07-4b0f-9b9c-129c92015b7f" (UID: "420d6f21-0f07-4b0f-9b9c-129c92015b7f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.954687 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-config\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.954817 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/420d6f21-0f07-4b0f-9b9c-129c92015b7f-kube-api-access-z98mz" (OuterVolumeSpecName: "kube-api-access-z98mz") pod "420d6f21-0f07-4b0f-9b9c-129c92015b7f" (UID: "420d6f21-0f07-4b0f-9b9c-129c92015b7f"). InnerVolumeSpecName "kube-api-access-z98mz". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.956626 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2df03843-0d55-4a23-95ed-389f35db9a4e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2df03843-0d55-4a23-95ed-389f35db9a4e" (UID: "2df03843-0d55-4a23-95ed-389f35db9a4e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.958333 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/19fdb149-a6fa-4242-bfa0-2e0959c0446f-serving-cert\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:46 crc kubenswrapper[5119]: I0130 00:15:46.969576 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntm42\" (UniqueName: \"kubernetes.io/projected/19fdb149-a6fa-4242-bfa0-2e0959c0446f-kube-api-access-ntm42\") pod \"route-controller-manager-6778fffc64-x2fr6\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.055202 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-config\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.055278 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f10193a8-86e9-44ff-8668-552d039f4117-tmp\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.055302 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-zxtsw\" (UniqueName: \"kubernetes.io/projected/f10193a8-86e9-44ff-8668-552d039f4117-kube-api-access-zxtsw\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.055356 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-client-ca\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.055454 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-proxy-ca-bundles\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.055479 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f10193a8-86e9-44ff-8668-552d039f4117-serving-cert\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.055539 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-z98mz\" (UniqueName: \"kubernetes.io/projected/420d6f21-0f07-4b0f-9b9c-129c92015b7f-kube-api-access-z98mz\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.055552 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/420d6f21-0f07-4b0f-9b9c-129c92015b7f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.055565 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-7h7n4\" (UniqueName: \"kubernetes.io/projected/2df03843-0d55-4a23-95ed-389f35db9a4e-kube-api-access-7h7n4\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.055578 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2df03843-0d55-4a23-95ed-389f35db9a4e-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.056416 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-client-ca\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.056494 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-proxy-ca-bundles\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.056512 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-config\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.056911 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f10193a8-86e9-44ff-8668-552d039f4117-tmp\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.064061 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f10193a8-86e9-44ff-8668-552d039f4117-serving-cert\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.077724 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxtsw\" (UniqueName: \"kubernetes.io/projected/f10193a8-86e9-44ff-8668-552d039f4117-kube-api-access-zxtsw\") pod \"controller-manager-7765df77bd-6k425\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.110575 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.149364 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.336367 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7765df77bd-6k425"] Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.353224 5119 generic.go:358] "Generic (PLEG): container finished" podID="2df03843-0d55-4a23-95ed-389f35db9a4e" containerID="98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f" exitCode=0 Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.353310 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" event={"ID":"2df03843-0d55-4a23-95ed-389f35db9a4e","Type":"ContainerDied","Data":"98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f"} Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.353336 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" event={"ID":"2df03843-0d55-4a23-95ed-389f35db9a4e","Type":"ContainerDied","Data":"f6dcb0a04aa14ef872133f22e6330db8d73fbc642d25f85278f1cb6168ec05cb"} Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.353354 5119 scope.go:117] "RemoveContainer" containerID="98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.353509 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86f879749f-6m6gc" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.357418 5119 generic.go:358] "Generic (PLEG): container finished" podID="420d6f21-0f07-4b0f-9b9c-129c92015b7f" containerID="de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f" exitCode=0 Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.357560 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" event={"ID":"420d6f21-0f07-4b0f-9b9c-129c92015b7f","Type":"ContainerDied","Data":"de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f"} Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.357590 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" event={"ID":"420d6f21-0f07-4b0f-9b9c-129c92015b7f","Type":"ContainerDied","Data":"50b31be490a8f0ee07f1176242cabe9d3eaac32f63e2579685da558b2966c8d7"} Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.357681 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.359506 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" event={"ID":"f10193a8-86e9-44ff-8668-552d039f4117","Type":"ContainerStarted","Data":"75aa5b45ef0e39120046c1f2f70f3eb06f02890d9209c940b436102d1ce2f474"} Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.373584 5119 scope.go:117] "RemoveContainer" containerID="98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f" Jan 30 00:15:47 crc kubenswrapper[5119]: E0130 00:15:47.374340 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f\": container with ID starting with 98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f not found: ID does not exist" containerID="98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.374410 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f"} err="failed to get container status \"98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f\": rpc error: code = NotFound desc = could not find container \"98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f\": container with ID starting with 98546775449ab5dd768f7caf52a2f0324714366fe862ffd8228e795a49558e5f not found: ID does not exist" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.374446 5119 scope.go:117] "RemoveContainer" containerID="de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.378751 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6"] Jan 30 00:15:47 crc kubenswrapper[5119]: W0130 00:15:47.387793 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19fdb149_a6fa_4242_bfa0_2e0959c0446f.slice/crio-0ecb616f67b36bc79754d72fbf7ce31c8ac2195ca6fa0cd9353d9637d792aa7d WatchSource:0}: Error finding container 0ecb616f67b36bc79754d72fbf7ce31c8ac2195ca6fa0cd9353d9637d792aa7d: Status 404 returned error can't find the container with id 0ecb616f67b36bc79754d72fbf7ce31c8ac2195ca6fa0cd9353d9637d792aa7d Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.392580 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-86f879749f-6m6gc"] Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.397949 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-86f879749f-6m6gc"] Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.401261 5119 scope.go:117] "RemoveContainer" containerID="de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f" Jan 30 00:15:47 crc kubenswrapper[5119]: E0130 00:15:47.401622 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f\": container with ID starting with de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f not found: ID does not exist" containerID="de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.401662 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f"} err="failed to get container status \"de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f\": rpc error: code = NotFound desc = could not find container \"de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f\": container with ID starting with de2d43edd5107fdbb40db73f14b96d37fed5281bb2865d09b65a51fbc7c2e00f not found: ID does not exist" Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.402094 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt"] Jan 30 00:15:47 crc kubenswrapper[5119]: I0130 00:15:47.407927 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-gm5mt"] Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.368173 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" event={"ID":"19fdb149-a6fa-4242-bfa0-2e0959c0446f","Type":"ContainerStarted","Data":"df02b104dd4edb6fe502c013762fa21192a8ea514294791146b2eaf11eb41158"} Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.368569 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.368581 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" event={"ID":"19fdb149-a6fa-4242-bfa0-2e0959c0446f","Type":"ContainerStarted","Data":"0ecb616f67b36bc79754d72fbf7ce31c8ac2195ca6fa0cd9353d9637d792aa7d"} Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.371540 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" event={"ID":"f10193a8-86e9-44ff-8668-552d039f4117","Type":"ContainerStarted","Data":"6d066f02525a98e2bbf70a492f8d73366e372072bd67b0bdfb7cc43b2e2788db"} Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.371839 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.385044 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" podStartSLOduration=2.385025835 podStartE2EDuration="2.385025835s" podCreationTimestamp="2026-01-30 00:15:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:15:48.382661457 +0000 UTC m=+352.396723916" watchObservedRunningTime="2026-01-30 00:15:48.385025835 +0000 UTC m=+352.399088294" Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.398839 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" podStartSLOduration=3.398820899 podStartE2EDuration="3.398820899s" podCreationTimestamp="2026-01-30 00:15:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:15:48.396433021 +0000 UTC m=+352.410495510" watchObservedRunningTime="2026-01-30 00:15:48.398820899 +0000 UTC m=+352.412883358" Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.446765 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.471541 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.754863 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2df03843-0d55-4a23-95ed-389f35db9a4e" path="/var/lib/kubelet/pods/2df03843-0d55-4a23-95ed-389f35db9a4e/volumes" Jan 30 00:15:48 crc kubenswrapper[5119]: I0130 00:15:48.755380 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="420d6f21-0f07-4b0f-9b9c-129c92015b7f" path="/var/lib/kubelet/pods/420d6f21-0f07-4b0f-9b9c-129c92015b7f/volumes" Jan 30 00:16:06 crc kubenswrapper[5119]: I0130 00:16:06.936296 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-66458b6674-w67fs"] Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.351036 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6"] Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.351820 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" podUID="19fdb149-a6fa-4242-bfa0-2e0959c0446f" containerName="route-controller-manager" containerID="cri-o://df02b104dd4edb6fe502c013762fa21192a8ea514294791146b2eaf11eb41158" gracePeriod=30 Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.367943 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7765df77bd-6k425"] Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.368381 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" podUID="f10193a8-86e9-44ff-8668-552d039f4117" containerName="controller-manager" containerID="cri-o://6d066f02525a98e2bbf70a492f8d73366e372072bd67b0bdfb7cc43b2e2788db" gracePeriod=30 Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.548655 5119 generic.go:358] "Generic (PLEG): container finished" podID="19fdb149-a6fa-4242-bfa0-2e0959c0446f" containerID="df02b104dd4edb6fe502c013762fa21192a8ea514294791146b2eaf11eb41158" exitCode=0 Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.549157 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" event={"ID":"19fdb149-a6fa-4242-bfa0-2e0959c0446f","Type":"ContainerDied","Data":"df02b104dd4edb6fe502c013762fa21192a8ea514294791146b2eaf11eb41158"} Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.550367 5119 generic.go:358] "Generic (PLEG): container finished" podID="f10193a8-86e9-44ff-8668-552d039f4117" containerID="6d066f02525a98e2bbf70a492f8d73366e372072bd67b0bdfb7cc43b2e2788db" exitCode=0 Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.550425 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" event={"ID":"f10193a8-86e9-44ff-8668-552d039f4117","Type":"ContainerDied","Data":"6d066f02525a98e2bbf70a492f8d73366e372072bd67b0bdfb7cc43b2e2788db"} Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.835785 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.858749 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb"] Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.859380 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="19fdb149-a6fa-4242-bfa0-2e0959c0446f" containerName="route-controller-manager" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.859426 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="19fdb149-a6fa-4242-bfa0-2e0959c0446f" containerName="route-controller-manager" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.859523 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="19fdb149-a6fa-4242-bfa0-2e0959c0446f" containerName="route-controller-manager" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.862987 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.869328 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb"] Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.985821 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntm42\" (UniqueName: \"kubernetes.io/projected/19fdb149-a6fa-4242-bfa0-2e0959c0446f-kube-api-access-ntm42\") pod \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.985969 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-config\") pod \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.986018 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/19fdb149-a6fa-4242-bfa0-2e0959c0446f-tmp\") pod \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.986065 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/19fdb149-a6fa-4242-bfa0-2e0959c0446f-serving-cert\") pod \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.986201 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-client-ca\") pod \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\" (UID: \"19fdb149-a6fa-4242-bfa0-2e0959c0446f\") " Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.986358 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-serving-cert\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.986419 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19fdb149-a6fa-4242-bfa0-2e0959c0446f-tmp" (OuterVolumeSpecName: "tmp") pod "19fdb149-a6fa-4242-bfa0-2e0959c0446f" (UID: "19fdb149-a6fa-4242-bfa0-2e0959c0446f"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.986477 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-client-ca\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.986754 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-config\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.986791 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-client-ca" (OuterVolumeSpecName: "client-ca") pod "19fdb149-a6fa-4242-bfa0-2e0959c0446f" (UID: "19fdb149-a6fa-4242-bfa0-2e0959c0446f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.986954 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-config" (OuterVolumeSpecName: "config") pod "19fdb149-a6fa-4242-bfa0-2e0959c0446f" (UID: "19fdb149-a6fa-4242-bfa0-2e0959c0446f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.986958 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dm8f\" (UniqueName: \"kubernetes.io/projected/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-kube-api-access-2dm8f\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.987057 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-tmp\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.987279 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.987300 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19fdb149-a6fa-4242-bfa0-2e0959c0446f-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.987309 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/19fdb149-a6fa-4242-bfa0-2e0959c0446f-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.991827 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19fdb149-a6fa-4242-bfa0-2e0959c0446f-kube-api-access-ntm42" (OuterVolumeSpecName: "kube-api-access-ntm42") pod "19fdb149-a6fa-4242-bfa0-2e0959c0446f" (UID: "19fdb149-a6fa-4242-bfa0-2e0959c0446f"). InnerVolumeSpecName "kube-api-access-ntm42". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:14 crc kubenswrapper[5119]: I0130 00:16:14.995522 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19fdb149-a6fa-4242-bfa0-2e0959c0446f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "19fdb149-a6fa-4242-bfa0-2e0959c0446f" (UID: "19fdb149-a6fa-4242-bfa0-2e0959c0446f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.088123 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2dm8f\" (UniqueName: \"kubernetes.io/projected/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-kube-api-access-2dm8f\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.088545 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-tmp\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.088579 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-serving-cert\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.088618 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-client-ca\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.088686 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-config\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.088731 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ntm42\" (UniqueName: \"kubernetes.io/projected/19fdb149-a6fa-4242-bfa0-2e0959c0446f-kube-api-access-ntm42\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.088747 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/19fdb149-a6fa-4242-bfa0-2e0959c0446f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.089084 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-tmp\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.089861 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-config\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.089895 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-client-ca\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.092937 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-serving-cert\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.097208 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.109257 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dm8f\" (UniqueName: \"kubernetes.io/projected/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-kube-api-access-2dm8f\") pod \"route-controller-manager-744948db7b-v2lzb\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.120459 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-77f59b7476-p79kb"] Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.121042 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f10193a8-86e9-44ff-8668-552d039f4117" containerName="controller-manager" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.121241 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10193a8-86e9-44ff-8668-552d039f4117" containerName="controller-manager" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.121344 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="f10193a8-86e9-44ff-8668-552d039f4117" containerName="controller-manager" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.130505 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-77f59b7476-p79kb"] Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.131139 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.181130 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.190906 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxtsw\" (UniqueName: \"kubernetes.io/projected/f10193a8-86e9-44ff-8668-552d039f4117-kube-api-access-zxtsw\") pod \"f10193a8-86e9-44ff-8668-552d039f4117\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.190972 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-config\") pod \"f10193a8-86e9-44ff-8668-552d039f4117\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.191097 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-proxy-ca-bundles\") pod \"f10193a8-86e9-44ff-8668-552d039f4117\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.191148 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f10193a8-86e9-44ff-8668-552d039f4117-serving-cert\") pod \"f10193a8-86e9-44ff-8668-552d039f4117\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.191208 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f10193a8-86e9-44ff-8668-552d039f4117-tmp\") pod \"f10193a8-86e9-44ff-8668-552d039f4117\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.191228 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-client-ca\") pod \"f10193a8-86e9-44ff-8668-552d039f4117\" (UID: \"f10193a8-86e9-44ff-8668-552d039f4117\") " Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.191668 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f10193a8-86e9-44ff-8668-552d039f4117-tmp" (OuterVolumeSpecName: "tmp") pod "f10193a8-86e9-44ff-8668-552d039f4117" (UID: "f10193a8-86e9-44ff-8668-552d039f4117"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.192054 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-client-ca" (OuterVolumeSpecName: "client-ca") pod "f10193a8-86e9-44ff-8668-552d039f4117" (UID: "f10193a8-86e9-44ff-8668-552d039f4117"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.192067 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f10193a8-86e9-44ff-8668-552d039f4117" (UID: "f10193a8-86e9-44ff-8668-552d039f4117"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.192185 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-config" (OuterVolumeSpecName: "config") pod "f10193a8-86e9-44ff-8668-552d039f4117" (UID: "f10193a8-86e9-44ff-8668-552d039f4117"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.194881 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f10193a8-86e9-44ff-8668-552d039f4117-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f10193a8-86e9-44ff-8668-552d039f4117" (UID: "f10193a8-86e9-44ff-8668-552d039f4117"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.194961 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f10193a8-86e9-44ff-8668-552d039f4117-kube-api-access-zxtsw" (OuterVolumeSpecName: "kube-api-access-zxtsw") pod "f10193a8-86e9-44ff-8668-552d039f4117" (UID: "f10193a8-86e9-44ff-8668-552d039f4117"). InnerVolumeSpecName "kube-api-access-zxtsw". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.292586 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-proxy-ca-bundles\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.292698 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-config\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.292727 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fd093292-646a-4556-85bc-21feed879f17-tmp\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.292743 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-client-ca\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.292821 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5x2m\" (UniqueName: \"kubernetes.io/projected/fd093292-646a-4556-85bc-21feed879f17-kube-api-access-g5x2m\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.292870 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd093292-646a-4556-85bc-21feed879f17-serving-cert\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.292961 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f10193a8-86e9-44ff-8668-552d039f4117-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.292974 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.292984 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zxtsw\" (UniqueName: \"kubernetes.io/projected/f10193a8-86e9-44ff-8668-552d039f4117-kube-api-access-zxtsw\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.292993 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.293026 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f10193a8-86e9-44ff-8668-552d039f4117-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.293035 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f10193a8-86e9-44ff-8668-552d039f4117-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.353144 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb"] Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.394457 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd093292-646a-4556-85bc-21feed879f17-serving-cert\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.394507 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-proxy-ca-bundles\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.394546 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-config\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.394562 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fd093292-646a-4556-85bc-21feed879f17-tmp\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.394576 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-client-ca\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.394628 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-g5x2m\" (UniqueName: \"kubernetes.io/projected/fd093292-646a-4556-85bc-21feed879f17-kube-api-access-g5x2m\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.395824 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fd093292-646a-4556-85bc-21feed879f17-tmp\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.396106 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-proxy-ca-bundles\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.396140 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-config\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.396493 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-client-ca\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.399718 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd093292-646a-4556-85bc-21feed879f17-serving-cert\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.411008 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5x2m\" (UniqueName: \"kubernetes.io/projected/fd093292-646a-4556-85bc-21feed879f17-kube-api-access-g5x2m\") pod \"controller-manager-77f59b7476-p79kb\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.444015 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.557184 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" event={"ID":"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6","Type":"ContainerStarted","Data":"a147e552c58d5f8178c860e90890f6c5557ac12785c6311281f1d457f6cbcb19"} Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.557236 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" event={"ID":"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6","Type":"ContainerStarted","Data":"7ba603b218173e8c8a89b2592cb03a14b6d430cc5ca3d4067bc918338953921e"} Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.559190 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.562935 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" event={"ID":"19fdb149-a6fa-4242-bfa0-2e0959c0446f","Type":"ContainerDied","Data":"0ecb616f67b36bc79754d72fbf7ce31c8ac2195ca6fa0cd9353d9637d792aa7d"} Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.562987 5119 scope.go:117] "RemoveContainer" containerID="df02b104dd4edb6fe502c013762fa21192a8ea514294791146b2eaf11eb41158" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.563244 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.578082 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.578494 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7765df77bd-6k425" event={"ID":"f10193a8-86e9-44ff-8668-552d039f4117","Type":"ContainerDied","Data":"75aa5b45ef0e39120046c1f2f70f3eb06f02890d9209c940b436102d1ce2f474"} Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.588430 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" podStartSLOduration=1.5884119540000001 podStartE2EDuration="1.588411954s" podCreationTimestamp="2026-01-30 00:16:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:16:15.580464641 +0000 UTC m=+379.594527100" watchObservedRunningTime="2026-01-30 00:16:15.588411954 +0000 UTC m=+379.602474413" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.597676 5119 scope.go:117] "RemoveContainer" containerID="6d066f02525a98e2bbf70a492f8d73366e372072bd67b0bdfb7cc43b2e2788db" Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.606200 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6"] Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.616607 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6778fffc64-x2fr6"] Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.624262 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7765df77bd-6k425"] Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.628433 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7765df77bd-6k425"] Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.847224 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-77f59b7476-p79kb"] Jan 30 00:16:15 crc kubenswrapper[5119]: I0130 00:16:15.877381 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:16 crc kubenswrapper[5119]: I0130 00:16:16.587095 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" event={"ID":"fd093292-646a-4556-85bc-21feed879f17","Type":"ContainerStarted","Data":"d2eb74dd67e2fd07748129fe235b13af14bab2c3d3a1ebc6bec44cf15b7d0af9"} Jan 30 00:16:16 crc kubenswrapper[5119]: I0130 00:16:16.587165 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:16 crc kubenswrapper[5119]: I0130 00:16:16.587180 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" event={"ID":"fd093292-646a-4556-85bc-21feed879f17","Type":"ContainerStarted","Data":"6a67c480f0f14b23e153005286097db6a92716a42d2e2ed906dbc789ee0822af"} Jan 30 00:16:16 crc kubenswrapper[5119]: I0130 00:16:16.606823 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" podStartSLOduration=2.606806153 podStartE2EDuration="2.606806153s" podCreationTimestamp="2026-01-30 00:16:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:16:16.603828861 +0000 UTC m=+380.617891320" watchObservedRunningTime="2026-01-30 00:16:16.606806153 +0000 UTC m=+380.620868612" Jan 30 00:16:16 crc kubenswrapper[5119]: I0130 00:16:16.754876 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19fdb149-a6fa-4242-bfa0-2e0959c0446f" path="/var/lib/kubelet/pods/19fdb149-a6fa-4242-bfa0-2e0959c0446f/volumes" Jan 30 00:16:16 crc kubenswrapper[5119]: I0130 00:16:16.755530 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f10193a8-86e9-44ff-8668-552d039f4117" path="/var/lib/kubelet/pods/f10193a8-86e9-44ff-8668-552d039f4117/volumes" Jan 30 00:16:16 crc kubenswrapper[5119]: I0130 00:16:16.962994 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:24 crc kubenswrapper[5119]: I0130 00:16:24.371236 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:16:24 crc kubenswrapper[5119]: I0130 00:16:24.371822 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:16:31 crc kubenswrapper[5119]: I0130 00:16:31.966050 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" podUID="6e32c931-da87-4115-8257-185ed217e76a" containerName="oauth-openshift" containerID="cri-o://fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00" gracePeriod=15 Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.502044 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.533513 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-5c9bff69bc-s97wp"] Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534328 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-router-certs\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534376 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-login\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534451 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-idp-0-file-data\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534482 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6e32c931-da87-4115-8257-185ed217e76a-audit-dir\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534546 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-session\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534587 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-audit-policies\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534609 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-service-ca\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534626 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-ocp-branding-template\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534645 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-serving-cert\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534659 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-cliconfig\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534675 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-trusted-ca-bundle\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534690 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dxxx\" (UniqueName: \"kubernetes.io/projected/6e32c931-da87-4115-8257-185ed217e76a-kube-api-access-8dxxx\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534710 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-provider-selection\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.534778 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-error\") pod \"6e32c931-da87-4115-8257-185ed217e76a\" (UID: \"6e32c931-da87-4115-8257-185ed217e76a\") " Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.536736 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.536927 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.537214 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6e32c931-da87-4115-8257-185ed217e76a-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.537316 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.537453 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.537488 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6e32c931-da87-4115-8257-185ed217e76a" containerName="oauth-openshift" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.537500 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e32c931-da87-4115-8257-185ed217e76a" containerName="oauth-openshift" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.537621 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="6e32c931-da87-4115-8257-185ed217e76a" containerName="oauth-openshift" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.553085 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.553270 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.553572 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.553842 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.555848 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.556064 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.556748 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.558169 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.559356 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e32c931-da87-4115-8257-185ed217e76a-kube-api-access-8dxxx" (OuterVolumeSpecName: "kube-api-access-8dxxx") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "kube-api-access-8dxxx". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.560149 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5c9bff69bc-s97wp"] Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.568162 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "6e32c931-da87-4115-8257-185ed217e76a" (UID: "6e32c931-da87-4115-8257-185ed217e76a"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636072 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636131 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636163 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636183 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636217 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-router-certs\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636238 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-session\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636264 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-audit-policies\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636293 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/68d5c84b-1571-4061-ac60-fa85f407de93-audit-dir\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636310 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636335 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-template-login\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636352 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-template-error\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636386 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636423 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-service-ca\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636452 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xv8zc\" (UniqueName: \"kubernetes.io/projected/68d5c84b-1571-4061-ac60-fa85f407de93-kube-api-access-xv8zc\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636503 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636519 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636531 5119 reconciler_common.go:299] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6e32c931-da87-4115-8257-185ed217e76a-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636542 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636552 5119 reconciler_common.go:299] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636562 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636574 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636585 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636733 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636837 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636857 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8dxxx\" (UniqueName: \"kubernetes.io/projected/6e32c931-da87-4115-8257-185ed217e76a-kube-api-access-8dxxx\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636880 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636896 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.636913 5119 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6e32c931-da87-4115-8257-185ed217e76a-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.669086 5119 generic.go:358] "Generic (PLEG): container finished" podID="6e32c931-da87-4115-8257-185ed217e76a" containerID="fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00" exitCode=0 Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.669155 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" event={"ID":"6e32c931-da87-4115-8257-185ed217e76a","Type":"ContainerDied","Data":"fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00"} Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.669185 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.669202 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-66458b6674-w67fs" event={"ID":"6e32c931-da87-4115-8257-185ed217e76a","Type":"ContainerDied","Data":"fe0907d28bfe4b6044528c5dac77d6d76289eec13cc0aad81e416d0aae0b7485"} Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.669225 5119 scope.go:117] "RemoveContainer" containerID="fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.689139 5119 scope.go:117] "RemoveContainer" containerID="fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00" Jan 30 00:16:32 crc kubenswrapper[5119]: E0130 00:16:32.690232 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00\": container with ID starting with fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00 not found: ID does not exist" containerID="fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.690340 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00"} err="failed to get container status \"fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00\": rpc error: code = NotFound desc = could not find container \"fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00\": container with ID starting with fbb4d4364bd7dd265cfbc3926916ddea7312f5ecbb9e65dfcf50832842a2ba00 not found: ID does not exist" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.701159 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-66458b6674-w67fs"] Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.704254 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-66458b6674-w67fs"] Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.737820 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.737856 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.737887 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.737909 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.737939 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-router-certs\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.737960 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-session\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.737984 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-audit-policies\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.738013 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/68d5c84b-1571-4061-ac60-fa85f407de93-audit-dir\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.738060 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/68d5c84b-1571-4061-ac60-fa85f407de93-audit-dir\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.738221 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.738281 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-template-login\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.738317 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-template-error\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.738383 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.738441 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-service-ca\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.738480 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-xv8zc\" (UniqueName: \"kubernetes.io/projected/68d5c84b-1571-4061-ac60-fa85f407de93-kube-api-access-xv8zc\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.739662 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-service-ca\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.739652 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-audit-policies\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.740109 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.740376 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.742293 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.742428 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-session\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.742521 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-router-certs\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.743047 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.743167 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-template-error\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.743599 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.743642 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-template-login\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.746572 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/68d5c84b-1571-4061-ac60-fa85f407de93-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.755405 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e32c931-da87-4115-8257-185ed217e76a" path="/var/lib/kubelet/pods/6e32c931-da87-4115-8257-185ed217e76a/volumes" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.757777 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-xv8zc\" (UniqueName: \"kubernetes.io/projected/68d5c84b-1571-4061-ac60-fa85f407de93-kube-api-access-xv8zc\") pod \"oauth-openshift-5c9bff69bc-s97wp\" (UID: \"68d5c84b-1571-4061-ac60-fa85f407de93\") " pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:32 crc kubenswrapper[5119]: I0130 00:16:32.898182 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:33 crc kubenswrapper[5119]: I0130 00:16:33.314200 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5c9bff69bc-s97wp"] Jan 30 00:16:33 crc kubenswrapper[5119]: I0130 00:16:33.675973 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" event={"ID":"68d5c84b-1571-4061-ac60-fa85f407de93","Type":"ContainerStarted","Data":"e8e413a9a064e7404037b50b4e1928dde8503ba0d4075940ef974d3f45abfba2"} Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.316917 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-77f59b7476-p79kb"] Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.317161 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" podUID="fd093292-646a-4556-85bc-21feed879f17" containerName="controller-manager" containerID="cri-o://d2eb74dd67e2fd07748129fe235b13af14bab2c3d3a1ebc6bec44cf15b7d0af9" gracePeriod=30 Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.335951 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb"] Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.336201 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" podUID="e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" containerName="route-controller-manager" containerID="cri-o://a147e552c58d5f8178c860e90890f6c5557ac12785c6311281f1d457f6cbcb19" gracePeriod=30 Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.683594 5119 generic.go:358] "Generic (PLEG): container finished" podID="fd093292-646a-4556-85bc-21feed879f17" containerID="d2eb74dd67e2fd07748129fe235b13af14bab2c3d3a1ebc6bec44cf15b7d0af9" exitCode=0 Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.683663 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" event={"ID":"fd093292-646a-4556-85bc-21feed879f17","Type":"ContainerDied","Data":"d2eb74dd67e2fd07748129fe235b13af14bab2c3d3a1ebc6bec44cf15b7d0af9"} Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.685199 5119 generic.go:358] "Generic (PLEG): container finished" podID="e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" containerID="a147e552c58d5f8178c860e90890f6c5557ac12785c6311281f1d457f6cbcb19" exitCode=0 Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.685253 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" event={"ID":"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6","Type":"ContainerDied","Data":"a147e552c58d5f8178c860e90890f6c5557ac12785c6311281f1d457f6cbcb19"} Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.686818 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" event={"ID":"68d5c84b-1571-4061-ac60-fa85f407de93","Type":"ContainerStarted","Data":"f8c0acc550225f050c6e46ec88f244ee3a29147802547d1e4539c4ce02a4778e"} Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.687038 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.696339 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.731190 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-5c9bff69bc-s97wp" podStartSLOduration=28.731170155 podStartE2EDuration="28.731170155s" podCreationTimestamp="2026-01-30 00:16:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:16:34.707971098 +0000 UTC m=+398.722033557" watchObservedRunningTime="2026-01-30 00:16:34.731170155 +0000 UTC m=+398.745232614" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.850038 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.889306 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr"] Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.901161 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" containerName="route-controller-manager" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.901205 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" containerName="route-controller-manager" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.901339 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" containerName="route-controller-manager" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.973809 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-serving-cert\") pod \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.973873 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-tmp\") pod \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.973918 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-client-ca\") pod \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.973957 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dm8f\" (UniqueName: \"kubernetes.io/projected/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-kube-api-access-2dm8f\") pod \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.973987 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-config\") pod \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\" (UID: \"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6\") " Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.974967 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-config" (OuterVolumeSpecName: "config") pod "e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" (UID: "e30b82a2-8004-44b0-a7c4-b91c66bfd4b6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.976021 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-client-ca" (OuterVolumeSpecName: "client-ca") pod "e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" (UID: "e30b82a2-8004-44b0-a7c4-b91c66bfd4b6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.976232 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-tmp" (OuterVolumeSpecName: "tmp") pod "e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" (UID: "e30b82a2-8004-44b0-a7c4-b91c66bfd4b6"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.985579 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" (UID: "e30b82a2-8004-44b0-a7c4-b91c66bfd4b6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:34 crc kubenswrapper[5119]: I0130 00:16:34.985722 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-kube-api-access-2dm8f" (OuterVolumeSpecName: "kube-api-access-2dm8f") pod "e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" (UID: "e30b82a2-8004-44b0-a7c4-b91c66bfd4b6"). InnerVolumeSpecName "kube-api-access-2dm8f". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.075099 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.075371 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.075382 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.075409 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-2dm8f\" (UniqueName: \"kubernetes.io/projected/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-kube-api-access-2dm8f\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.075421 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.085892 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr"] Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.086040 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.182541 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-config\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.182723 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-client-ca\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.182761 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2plq6\" (UniqueName: \"kubernetes.io/projected/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-kube-api-access-2plq6\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.182811 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-serving-cert\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.182892 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-tmp\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.216304 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.245439 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb"] Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.246220 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="fd093292-646a-4556-85bc-21feed879f17" containerName="controller-manager" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.246246 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd093292-646a-4556-85bc-21feed879f17" containerName="controller-manager" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.246426 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="fd093292-646a-4556-85bc-21feed879f17" containerName="controller-manager" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284400 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-config\") pod \"fd093292-646a-4556-85bc-21feed879f17\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284589 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-proxy-ca-bundles\") pod \"fd093292-646a-4556-85bc-21feed879f17\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284644 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd093292-646a-4556-85bc-21feed879f17-serving-cert\") pod \"fd093292-646a-4556-85bc-21feed879f17\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284676 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fd093292-646a-4556-85bc-21feed879f17-tmp\") pod \"fd093292-646a-4556-85bc-21feed879f17\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284694 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-client-ca\") pod \"fd093292-646a-4556-85bc-21feed879f17\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284737 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5x2m\" (UniqueName: \"kubernetes.io/projected/fd093292-646a-4556-85bc-21feed879f17-kube-api-access-g5x2m\") pod \"fd093292-646a-4556-85bc-21feed879f17\" (UID: \"fd093292-646a-4556-85bc-21feed879f17\") " Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284832 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-tmp\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284894 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-config\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284936 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-client-ca\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284960 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2plq6\" (UniqueName: \"kubernetes.io/projected/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-kube-api-access-2plq6\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.284984 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-serving-cert\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.285143 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "fd093292-646a-4556-85bc-21feed879f17" (UID: "fd093292-646a-4556-85bc-21feed879f17"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.285213 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-config" (OuterVolumeSpecName: "config") pod "fd093292-646a-4556-85bc-21feed879f17" (UID: "fd093292-646a-4556-85bc-21feed879f17"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.285600 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-tmp\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.285784 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd093292-646a-4556-85bc-21feed879f17-tmp" (OuterVolumeSpecName: "tmp") pod "fd093292-646a-4556-85bc-21feed879f17" (UID: "fd093292-646a-4556-85bc-21feed879f17"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.286002 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-client-ca" (OuterVolumeSpecName: "client-ca") pod "fd093292-646a-4556-85bc-21feed879f17" (UID: "fd093292-646a-4556-85bc-21feed879f17"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.286161 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-client-ca\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.286790 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-config\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.289209 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd093292-646a-4556-85bc-21feed879f17-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "fd093292-646a-4556-85bc-21feed879f17" (UID: "fd093292-646a-4556-85bc-21feed879f17"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.289335 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd093292-646a-4556-85bc-21feed879f17-kube-api-access-g5x2m" (OuterVolumeSpecName: "kube-api-access-g5x2m") pod "fd093292-646a-4556-85bc-21feed879f17" (UID: "fd093292-646a-4556-85bc-21feed879f17"). InnerVolumeSpecName "kube-api-access-g5x2m". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.293358 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-serving-cert\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.301671 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2plq6\" (UniqueName: \"kubernetes.io/projected/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-kube-api-access-2plq6\") pod \"route-controller-manager-7478f956d9-cc8rr\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.310356 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb"] Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.310510 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386028 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-client-ca\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386073 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-tmp\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386100 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbnnh\" (UniqueName: \"kubernetes.io/projected/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-kube-api-access-nbnnh\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386133 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-proxy-ca-bundles\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386171 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-serving-cert\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386203 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-config\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386244 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386255 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd093292-646a-4556-85bc-21feed879f17-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386263 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fd093292-646a-4556-85bc-21feed879f17-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386271 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386279 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-g5x2m\" (UniqueName: \"kubernetes.io/projected/fd093292-646a-4556-85bc-21feed879f17-kube-api-access-g5x2m\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.386288 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd093292-646a-4556-85bc-21feed879f17-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.400961 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.523177 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-serving-cert\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.523246 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-config\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.523288 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-client-ca\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.523343 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-tmp\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.523540 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-nbnnh\" (UniqueName: \"kubernetes.io/projected/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-kube-api-access-nbnnh\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.523711 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-proxy-ca-bundles\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.524053 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-tmp\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.524626 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-client-ca\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.524745 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-proxy-ca-bundles\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.524923 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-config\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.527900 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-serving-cert\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.562190 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbnnh\" (UniqueName: \"kubernetes.io/projected/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-kube-api-access-nbnnh\") pod \"controller-manager-5f985bd6f7-wr9zb\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.623020 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.660810 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr"] Jan 30 00:16:35 crc kubenswrapper[5119]: W0130 00:16:35.674212 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod558799fb_a3a2_4f28_8a9c_fd6f67d47acd.slice/crio-e72b574cec57d0a037e1f69f56443d721aa82b624d7ae40a9e488f66a0bb39d5 WatchSource:0}: Error finding container e72b574cec57d0a037e1f69f56443d721aa82b624d7ae40a9e488f66a0bb39d5: Status 404 returned error can't find the container with id e72b574cec57d0a037e1f69f56443d721aa82b624d7ae40a9e488f66a0bb39d5 Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.695558 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.695595 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb" event={"ID":"e30b82a2-8004-44b0-a7c4-b91c66bfd4b6","Type":"ContainerDied","Data":"7ba603b218173e8c8a89b2592cb03a14b6d430cc5ca3d4067bc918338953921e"} Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.695695 5119 scope.go:117] "RemoveContainer" containerID="a147e552c58d5f8178c860e90890f6c5557ac12785c6311281f1d457f6cbcb19" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.699948 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" event={"ID":"fd093292-646a-4556-85bc-21feed879f17","Type":"ContainerDied","Data":"6a67c480f0f14b23e153005286097db6a92716a42d2e2ed906dbc789ee0822af"} Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.700072 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77f59b7476-p79kb" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.718145 5119 scope.go:117] "RemoveContainer" containerID="d2eb74dd67e2fd07748129fe235b13af14bab2c3d3a1ebc6bec44cf15b7d0af9" Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.718288 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" event={"ID":"558799fb-a3a2-4f28-8a9c-fd6f67d47acd","Type":"ContainerStarted","Data":"e72b574cec57d0a037e1f69f56443d721aa82b624d7ae40a9e488f66a0bb39d5"} Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.729810 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb"] Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.733517 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-744948db7b-v2lzb"] Jan 30 00:16:35 crc kubenswrapper[5119]: W0130 00:16:35.801798 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb4f9d62_f99a_4bca_b7db_c009c93cfdbe.slice/crio-ffe95ea218ab42f21c6b7950755469f1b8deb045eddf6c9768bd0298c4487d99 WatchSource:0}: Error finding container ffe95ea218ab42f21c6b7950755469f1b8deb045eddf6c9768bd0298c4487d99: Status 404 returned error can't find the container with id ffe95ea218ab42f21c6b7950755469f1b8deb045eddf6c9768bd0298c4487d99 Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.845846 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-77f59b7476-p79kb"] Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.845895 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-77f59b7476-p79kb"] Jan 30 00:16:35 crc kubenswrapper[5119]: I0130 00:16:35.938736 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb"] Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.730197 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" event={"ID":"db4f9d62-f99a-4bca-b7db-c009c93cfdbe","Type":"ContainerStarted","Data":"c854bce92801ca91fee4f0ac1a416a4221601984d87456496cb963dd92c2cad2"} Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.730754 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.730772 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" event={"ID":"db4f9d62-f99a-4bca-b7db-c009c93cfdbe","Type":"ContainerStarted","Data":"ffe95ea218ab42f21c6b7950755469f1b8deb045eddf6c9768bd0298c4487d99"} Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.733680 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" event={"ID":"558799fb-a3a2-4f28-8a9c-fd6f67d47acd","Type":"ContainerStarted","Data":"6a4b2030dd8d86fcf744bb16ff9535ba4242ebfff88605af5224fd7614b5e888"} Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.733827 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.737633 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.738269 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.750865 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" podStartSLOduration=2.750844618 podStartE2EDuration="2.750844618s" podCreationTimestamp="2026-01-30 00:16:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:16:36.748219754 +0000 UTC m=+400.762282213" watchObservedRunningTime="2026-01-30 00:16:36.750844618 +0000 UTC m=+400.764907077" Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.755550 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e30b82a2-8004-44b0-a7c4-b91c66bfd4b6" path="/var/lib/kubelet/pods/e30b82a2-8004-44b0-a7c4-b91c66bfd4b6/volumes" Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.756113 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd093292-646a-4556-85bc-21feed879f17" path="/var/lib/kubelet/pods/fd093292-646a-4556-85bc-21feed879f17/volumes" Jan 30 00:16:36 crc kubenswrapper[5119]: I0130 00:16:36.800330 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" podStartSLOduration=2.8003122080000002 podStartE2EDuration="2.800312208s" podCreationTimestamp="2026-01-30 00:16:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:16:36.789403021 +0000 UTC m=+400.803465480" watchObservedRunningTime="2026-01-30 00:16:36.800312208 +0000 UTC m=+400.814374667" Jan 30 00:16:46 crc kubenswrapper[5119]: I0130 00:16:46.136212 5119 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.313025 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb"] Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.317751 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" podUID="db4f9d62-f99a-4bca-b7db-c009c93cfdbe" containerName="controller-manager" containerID="cri-o://c854bce92801ca91fee4f0ac1a416a4221601984d87456496cb963dd92c2cad2" gracePeriod=30 Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.342219 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr"] Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.342485 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" podUID="558799fb-a3a2-4f28-8a9c-fd6f67d47acd" containerName="route-controller-manager" containerID="cri-o://6a4b2030dd8d86fcf744bb16ff9535ba4242ebfff88605af5224fd7614b5e888" gracePeriod=30 Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.371288 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.371695 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.837880 5119 generic.go:358] "Generic (PLEG): container finished" podID="558799fb-a3a2-4f28-8a9c-fd6f67d47acd" containerID="6a4b2030dd8d86fcf744bb16ff9535ba4242ebfff88605af5224fd7614b5e888" exitCode=0 Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.838005 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" event={"ID":"558799fb-a3a2-4f28-8a9c-fd6f67d47acd","Type":"ContainerDied","Data":"6a4b2030dd8d86fcf744bb16ff9535ba4242ebfff88605af5224fd7614b5e888"} Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.838319 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" event={"ID":"558799fb-a3a2-4f28-8a9c-fd6f67d47acd","Type":"ContainerDied","Data":"e72b574cec57d0a037e1f69f56443d721aa82b624d7ae40a9e488f66a0bb39d5"} Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.838338 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e72b574cec57d0a037e1f69f56443d721aa82b624d7ae40a9e488f66a0bb39d5" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.839920 5119 generic.go:358] "Generic (PLEG): container finished" podID="db4f9d62-f99a-4bca-b7db-c009c93cfdbe" containerID="c854bce92801ca91fee4f0ac1a416a4221601984d87456496cb963dd92c2cad2" exitCode=0 Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.840021 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" event={"ID":"db4f9d62-f99a-4bca-b7db-c009c93cfdbe","Type":"ContainerDied","Data":"c854bce92801ca91fee4f0ac1a416a4221601984d87456496cb963dd92c2cad2"} Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.843564 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.878381 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb"] Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.878954 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="558799fb-a3a2-4f28-8a9c-fd6f67d47acd" containerName="route-controller-manager" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.878981 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="558799fb-a3a2-4f28-8a9c-fd6f67d47acd" containerName="route-controller-manager" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.879109 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="558799fb-a3a2-4f28-8a9c-fd6f67d47acd" containerName="route-controller-manager" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.888251 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.900769 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb"] Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.976666 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-config\") pod \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.976756 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-client-ca\") pod \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.976860 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2plq6\" (UniqueName: \"kubernetes.io/projected/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-kube-api-access-2plq6\") pod \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.976971 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-serving-cert\") pod \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.977003 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-tmp\") pod \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\" (UID: \"558799fb-a3a2-4f28-8a9c-fd6f67d47acd\") " Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.977606 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-config" (OuterVolumeSpecName: "config") pod "558799fb-a3a2-4f28-8a9c-fd6f67d47acd" (UID: "558799fb-a3a2-4f28-8a9c-fd6f67d47acd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.977636 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-tmp" (OuterVolumeSpecName: "tmp") pod "558799fb-a3a2-4f28-8a9c-fd6f67d47acd" (UID: "558799fb-a3a2-4f28-8a9c-fd6f67d47acd"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.977732 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-client-ca" (OuterVolumeSpecName: "client-ca") pod "558799fb-a3a2-4f28-8a9c-fd6f67d47acd" (UID: "558799fb-a3a2-4f28-8a9c-fd6f67d47acd"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.986533 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "558799fb-a3a2-4f28-8a9c-fd6f67d47acd" (UID: "558799fb-a3a2-4f28-8a9c-fd6f67d47acd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:54 crc kubenswrapper[5119]: I0130 00:16:54.997589 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-kube-api-access-2plq6" (OuterVolumeSpecName: "kube-api-access-2plq6") pod "558799fb-a3a2-4f28-8a9c-fd6f67d47acd" (UID: "558799fb-a3a2-4f28-8a9c-fd6f67d47acd"). InnerVolumeSpecName "kube-api-access-2plq6". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.070809 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.078414 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-config\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.078470 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02044f8a-bf33-4525-b335-6ae4d67ce350-serving-cert\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.078765 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gjcs\" (UniqueName: \"kubernetes.io/projected/02044f8a-bf33-4525-b335-6ae4d67ce350-kube-api-access-9gjcs\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.078854 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-client-ca\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.078990 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/02044f8a-bf33-4525-b335-6ae4d67ce350-tmp\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.079102 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.079120 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.079131 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-2plq6\" (UniqueName: \"kubernetes.io/projected/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-kube-api-access-2plq6\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.079145 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.079153 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/558799fb-a3a2-4f28-8a9c-fd6f67d47acd-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.099167 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk"] Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.099734 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="db4f9d62-f99a-4bca-b7db-c009c93cfdbe" containerName="controller-manager" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.099749 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="db4f9d62-f99a-4bca-b7db-c009c93cfdbe" containerName="controller-manager" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.099841 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="db4f9d62-f99a-4bca-b7db-c009c93cfdbe" containerName="controller-manager" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.107817 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.118440 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk"] Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180054 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbnnh\" (UniqueName: \"kubernetes.io/projected/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-kube-api-access-nbnnh\") pod \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180127 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-config\") pod \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180179 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-proxy-ca-bundles\") pod \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180197 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-serving-cert\") pod \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180258 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-client-ca\") pod \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180275 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-tmp\") pod \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\" (UID: \"db4f9d62-f99a-4bca-b7db-c009c93cfdbe\") " Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180457 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/02044f8a-bf33-4525-b335-6ae4d67ce350-tmp\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180491 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-config\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180509 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02044f8a-bf33-4525-b335-6ae4d67ce350-serving-cert\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180575 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-9gjcs\" (UniqueName: \"kubernetes.io/projected/02044f8a-bf33-4525-b335-6ae4d67ce350-kube-api-access-9gjcs\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180601 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-client-ca\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180831 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-tmp" (OuterVolumeSpecName: "tmp") pod "db4f9d62-f99a-4bca-b7db-c009c93cfdbe" (UID: "db4f9d62-f99a-4bca-b7db-c009c93cfdbe"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.180919 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/02044f8a-bf33-4525-b335-6ae4d67ce350-tmp\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.181360 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-client-ca" (OuterVolumeSpecName: "client-ca") pod "db4f9d62-f99a-4bca-b7db-c009c93cfdbe" (UID: "db4f9d62-f99a-4bca-b7db-c009c93cfdbe"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.181373 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "db4f9d62-f99a-4bca-b7db-c009c93cfdbe" (UID: "db4f9d62-f99a-4bca-b7db-c009c93cfdbe"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.181599 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-client-ca\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.181669 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-config" (OuterVolumeSpecName: "config") pod "db4f9d62-f99a-4bca-b7db-c009c93cfdbe" (UID: "db4f9d62-f99a-4bca-b7db-c009c93cfdbe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.181764 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-config\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.184760 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-kube-api-access-nbnnh" (OuterVolumeSpecName: "kube-api-access-nbnnh") pod "db4f9d62-f99a-4bca-b7db-c009c93cfdbe" (UID: "db4f9d62-f99a-4bca-b7db-c009c93cfdbe"). InnerVolumeSpecName "kube-api-access-nbnnh". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.184925 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "db4f9d62-f99a-4bca-b7db-c009c93cfdbe" (UID: "db4f9d62-f99a-4bca-b7db-c009c93cfdbe"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.185679 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02044f8a-bf33-4525-b335-6ae4d67ce350-serving-cert\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.198001 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gjcs\" (UniqueName: \"kubernetes.io/projected/02044f8a-bf33-4525-b335-6ae4d67ce350-kube-api-access-9gjcs\") pod \"route-controller-manager-78b7754959-kd4pb\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.201666 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.282429 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56f52cd3-d550-4724-8c03-bc908a45648a-serving-cert\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.282771 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-proxy-ca-bundles\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.282892 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-client-ca\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.283002 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq4x7\" (UniqueName: \"kubernetes.io/projected/56f52cd3-d550-4724-8c03-bc908a45648a-kube-api-access-vq4x7\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.283132 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/56f52cd3-d550-4724-8c03-bc908a45648a-tmp\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.283234 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-config\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.283629 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-nbnnh\" (UniqueName: \"kubernetes.io/projected/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-kube-api-access-nbnnh\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.283676 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.283692 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.283704 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.283719 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.283727 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/db4f9d62-f99a-4bca-b7db-c009c93cfdbe-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.385661 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56f52cd3-d550-4724-8c03-bc908a45648a-serving-cert\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.385910 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-proxy-ca-bundles\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.386021 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-client-ca\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.386050 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-vq4x7\" (UniqueName: \"kubernetes.io/projected/56f52cd3-d550-4724-8c03-bc908a45648a-kube-api-access-vq4x7\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.386205 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/56f52cd3-d550-4724-8c03-bc908a45648a-tmp\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.386256 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-config\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.387230 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-client-ca\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.390705 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/56f52cd3-d550-4724-8c03-bc908a45648a-tmp\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.391085 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56f52cd3-d550-4724-8c03-bc908a45648a-serving-cert\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.391363 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-proxy-ca-bundles\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.391451 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-config\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.406190 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq4x7\" (UniqueName: \"kubernetes.io/projected/56f52cd3-d550-4724-8c03-bc908a45648a-kube-api-access-vq4x7\") pod \"controller-manager-7d4657cfcf-zzkxk\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.433055 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.647116 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb"] Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.689400 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk"] Jan 30 00:16:55 crc kubenswrapper[5119]: W0130 00:16:55.693454 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56f52cd3_d550_4724_8c03_bc908a45648a.slice/crio-955627af2428fd183ef9e4887ea785c74228bcfadde8af32eecad95e8b42a2a1 WatchSource:0}: Error finding container 955627af2428fd183ef9e4887ea785c74228bcfadde8af32eecad95e8b42a2a1: Status 404 returned error can't find the container with id 955627af2428fd183ef9e4887ea785c74228bcfadde8af32eecad95e8b42a2a1 Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.784345 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2jg89"] Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.784917 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2jg89" podUID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerName="registry-server" containerID="cri-o://08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6" gracePeriod=30 Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.792223 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jqrl2"] Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.792610 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jqrl2" podUID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerName="registry-server" containerID="cri-o://a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604" gracePeriod=30 Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.824306 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-xmw98"] Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.827496 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" podUID="e627abc4-228d-4133-8f48-393e979d9826" containerName="marketplace-operator" containerID="cri-o://7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d" gracePeriod=30 Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.835412 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xrxfc"] Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.835930 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xrxfc" podUID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerName="registry-server" containerID="cri-o://44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283" gracePeriod=30 Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.859993 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-2mvgv"] Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.866584 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-889qp"] Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.866649 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" event={"ID":"56f52cd3-d550-4724-8c03-bc908a45648a","Type":"ContainerStarted","Data":"955627af2428fd183ef9e4887ea785c74228bcfadde8af32eecad95e8b42a2a1"} Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.867010 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-889qp" podUID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerName="registry-server" containerID="cri-o://8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023" gracePeriod=30 Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.867179 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.868360 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" event={"ID":"02044f8a-bf33-4525-b335-6ae4d67ce350","Type":"ContainerStarted","Data":"a44a3fc7f7e7669d35284436ccad84e29d4e777432dd4248755ad252e60ce520"} Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.868545 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-2mvgv"] Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.875517 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.875614 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" event={"ID":"db4f9d62-f99a-4bca-b7db-c009c93cfdbe","Type":"ContainerDied","Data":"ffe95ea218ab42f21c6b7950755469f1b8deb045eddf6c9768bd0298c4487d99"} Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.880127 5119 scope.go:117] "RemoveContainer" containerID="c854bce92801ca91fee4f0ac1a416a4221601984d87456496cb963dd92c2cad2" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.875659 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.993868 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-tmp\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.993954 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.994033 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:55 crc kubenswrapper[5119]: I0130 00:16:55.994061 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48fbw\" (UniqueName: \"kubernetes.io/projected/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-kube-api-access-48fbw\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.094816 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.098499 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.098695 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.098757 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-48fbw\" (UniqueName: \"kubernetes.io/projected/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-kube-api-access-48fbw\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.098821 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-tmp\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.099207 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-tmp\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.108175 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.116538 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb"] Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.119742 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-48fbw\" (UniqueName: \"kubernetes.io/projected/5ca130d6-3d7a-4f6a-8ba5-5f0571f62558-kube-api-access-48fbw\") pod \"marketplace-operator-547dbd544d-2mvgv\" (UID: \"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.119866 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5f985bd6f7-wr9zb"] Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.128943 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr"] Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.140298 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7478f956d9-cc8rr"] Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.162433 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.200732 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-utilities\") pod \"88f818c0-b63a-4707-a2ab-49c0e9c58544\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.201253 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-catalog-content\") pod \"88f818c0-b63a-4707-a2ab-49c0e9c58544\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.201312 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwcnq\" (UniqueName: \"kubernetes.io/projected/88f818c0-b63a-4707-a2ab-49c0e9c58544-kube-api-access-vwcnq\") pod \"88f818c0-b63a-4707-a2ab-49c0e9c58544\" (UID: \"88f818c0-b63a-4707-a2ab-49c0e9c58544\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.202638 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-utilities" (OuterVolumeSpecName: "utilities") pod "88f818c0-b63a-4707-a2ab-49c0e9c58544" (UID: "88f818c0-b63a-4707-a2ab-49c0e9c58544"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.207880 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88f818c0-b63a-4707-a2ab-49c0e9c58544-kube-api-access-vwcnq" (OuterVolumeSpecName: "kube-api-access-vwcnq") pod "88f818c0-b63a-4707-a2ab-49c0e9c58544" (UID: "88f818c0-b63a-4707-a2ab-49c0e9c58544"). InnerVolumeSpecName "kube-api-access-vwcnq". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.287795 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "88f818c0-b63a-4707-a2ab-49c0e9c58544" (UID: "88f818c0-b63a-4707-a2ab-49c0e9c58544"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.302572 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.302607 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-vwcnq\" (UniqueName: \"kubernetes.io/projected/88f818c0-b63a-4707-a2ab-49c0e9c58544-kube-api-access-vwcnq\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.302621 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88f818c0-b63a-4707-a2ab-49c0e9c58544-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.308459 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.332038 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.348038 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.378083 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403031 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlbrs\" (UniqueName: \"kubernetes.io/projected/e627abc4-228d-4133-8f48-393e979d9826-kube-api-access-xlbrs\") pod \"e627abc4-228d-4133-8f48-393e979d9826\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403121 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e627abc4-228d-4133-8f48-393e979d9826-tmp\") pod \"e627abc4-228d-4133-8f48-393e979d9826\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403163 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-utilities\") pod \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403205 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-catalog-content\") pod \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403222 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-catalog-content\") pod \"b956d3a0-19db-47f7-bc95-b3371c1e7968\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403248 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-catalog-content\") pod \"b5189257-f48b-458c-a523-2c1d73cd3f63\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403275 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dj8j\" (UniqueName: \"kubernetes.io/projected/b956d3a0-19db-47f7-bc95-b3371c1e7968-kube-api-access-7dj8j\") pod \"b956d3a0-19db-47f7-bc95-b3371c1e7968\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403312 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-utilities\") pod \"b956d3a0-19db-47f7-bc95-b3371c1e7968\" (UID: \"b956d3a0-19db-47f7-bc95-b3371c1e7968\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403326 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjhv6\" (UniqueName: \"kubernetes.io/projected/b5189257-f48b-458c-a523-2c1d73cd3f63-kube-api-access-zjhv6\") pod \"b5189257-f48b-458c-a523-2c1d73cd3f63\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403366 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbpkl\" (UniqueName: \"kubernetes.io/projected/c0a462d2-1b1f-47e8-9c33-1700f405a90d-kube-api-access-zbpkl\") pod \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\" (UID: \"c0a462d2-1b1f-47e8-9c33-1700f405a90d\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403384 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-utilities\") pod \"b5189257-f48b-458c-a523-2c1d73cd3f63\" (UID: \"b5189257-f48b-458c-a523-2c1d73cd3f63\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403438 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e627abc4-228d-4133-8f48-393e979d9826-marketplace-trusted-ca\") pod \"e627abc4-228d-4133-8f48-393e979d9826\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403472 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e627abc4-228d-4133-8f48-393e979d9826-marketplace-operator-metrics\") pod \"e627abc4-228d-4133-8f48-393e979d9826\" (UID: \"e627abc4-228d-4133-8f48-393e979d9826\") " Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403525 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e627abc4-228d-4133-8f48-393e979d9826-tmp" (OuterVolumeSpecName: "tmp") pod "e627abc4-228d-4133-8f48-393e979d9826" (UID: "e627abc4-228d-4133-8f48-393e979d9826"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.403685 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e627abc4-228d-4133-8f48-393e979d9826-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.405271 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e627abc4-228d-4133-8f48-393e979d9826-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "e627abc4-228d-4133-8f48-393e979d9826" (UID: "e627abc4-228d-4133-8f48-393e979d9826"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.405902 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-utilities" (OuterVolumeSpecName: "utilities") pod "b956d3a0-19db-47f7-bc95-b3371c1e7968" (UID: "b956d3a0-19db-47f7-bc95-b3371c1e7968"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.405940 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-utilities" (OuterVolumeSpecName: "utilities") pod "c0a462d2-1b1f-47e8-9c33-1700f405a90d" (UID: "c0a462d2-1b1f-47e8-9c33-1700f405a90d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.407841 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b956d3a0-19db-47f7-bc95-b3371c1e7968-kube-api-access-7dj8j" (OuterVolumeSpecName: "kube-api-access-7dj8j") pod "b956d3a0-19db-47f7-bc95-b3371c1e7968" (UID: "b956d3a0-19db-47f7-bc95-b3371c1e7968"). InnerVolumeSpecName "kube-api-access-7dj8j". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.408657 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0a462d2-1b1f-47e8-9c33-1700f405a90d-kube-api-access-zbpkl" (OuterVolumeSpecName: "kube-api-access-zbpkl") pod "c0a462d2-1b1f-47e8-9c33-1700f405a90d" (UID: "c0a462d2-1b1f-47e8-9c33-1700f405a90d"). InnerVolumeSpecName "kube-api-access-zbpkl". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.409735 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e627abc4-228d-4133-8f48-393e979d9826-kube-api-access-xlbrs" (OuterVolumeSpecName: "kube-api-access-xlbrs") pod "e627abc4-228d-4133-8f48-393e979d9826" (UID: "e627abc4-228d-4133-8f48-393e979d9826"). InnerVolumeSpecName "kube-api-access-xlbrs". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.412835 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5189257-f48b-458c-a523-2c1d73cd3f63-kube-api-access-zjhv6" (OuterVolumeSpecName: "kube-api-access-zjhv6") pod "b5189257-f48b-458c-a523-2c1d73cd3f63" (UID: "b5189257-f48b-458c-a523-2c1d73cd3f63"). InnerVolumeSpecName "kube-api-access-zjhv6". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.420541 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-utilities" (OuterVolumeSpecName: "utilities") pod "b5189257-f48b-458c-a523-2c1d73cd3f63" (UID: "b5189257-f48b-458c-a523-2c1d73cd3f63"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.421317 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.425477 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e627abc4-228d-4133-8f48-393e979d9826-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "e627abc4-228d-4133-8f48-393e979d9826" (UID: "e627abc4-228d-4133-8f48-393e979d9826"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.434373 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b956d3a0-19db-47f7-bc95-b3371c1e7968" (UID: "b956d3a0-19db-47f7-bc95-b3371c1e7968"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.440917 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c0a462d2-1b1f-47e8-9c33-1700f405a90d" (UID: "c0a462d2-1b1f-47e8-9c33-1700f405a90d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506072 5119 reconciler_common.go:299] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e627abc4-228d-4133-8f48-393e979d9826-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506113 5119 reconciler_common.go:299] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e627abc4-228d-4133-8f48-393e979d9826-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506125 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xlbrs\" (UniqueName: \"kubernetes.io/projected/e627abc4-228d-4133-8f48-393e979d9826-kube-api-access-xlbrs\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506136 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506148 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a462d2-1b1f-47e8-9c33-1700f405a90d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506158 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506168 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-7dj8j\" (UniqueName: \"kubernetes.io/projected/b956d3a0-19db-47f7-bc95-b3371c1e7968-kube-api-access-7dj8j\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506179 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b956d3a0-19db-47f7-bc95-b3371c1e7968-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506189 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zjhv6\" (UniqueName: \"kubernetes.io/projected/b5189257-f48b-458c-a523-2c1d73cd3f63-kube-api-access-zjhv6\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506200 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zbpkl\" (UniqueName: \"kubernetes.io/projected/c0a462d2-1b1f-47e8-9c33-1700f405a90d-kube-api-access-zbpkl\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506210 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.506934 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b5189257-f48b-458c-a523-2c1d73cd3f63" (UID: "b5189257-f48b-458c-a523-2c1d73cd3f63"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.607871 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5189257-f48b-458c-a523-2c1d73cd3f63-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.755601 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="558799fb-a3a2-4f28-8a9c-fd6f67d47acd" path="/var/lib/kubelet/pods/558799fb-a3a2-4f28-8a9c-fd6f67d47acd/volumes" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.756383 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db4f9d62-f99a-4bca-b7db-c009c93cfdbe" path="/var/lib/kubelet/pods/db4f9d62-f99a-4bca-b7db-c009c93cfdbe/volumes" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.883775 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" event={"ID":"56f52cd3-d550-4724-8c03-bc908a45648a","Type":"ContainerStarted","Data":"a4c1afdab4dd88f65a3139334d4d5388dac9f5ade00984b14015178e68190eff"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.884061 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.885483 5119 generic.go:358] "Generic (PLEG): container finished" podID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerID="08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6" exitCode=0 Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.885549 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2jg89" event={"ID":"c0a462d2-1b1f-47e8-9c33-1700f405a90d","Type":"ContainerDied","Data":"08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.885571 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2jg89" event={"ID":"c0a462d2-1b1f-47e8-9c33-1700f405a90d","Type":"ContainerDied","Data":"07a0629827bad9428219265f2d8638f06de8784946e24cf5a8fd1bb658527aa7"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.885587 5119 scope.go:117] "RemoveContainer" containerID="08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.885683 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2jg89" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.888111 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" event={"ID":"02044f8a-bf33-4525-b335-6ae4d67ce350","Type":"ContainerStarted","Data":"dbe42818d03fc245dda0002ff1b46aae0a2a3f1ba001f6a942a7d3d44e20c9e3"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.888876 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.891904 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.894129 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.895360 5119 generic.go:358] "Generic (PLEG): container finished" podID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerID="44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283" exitCode=0 Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.895442 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xrxfc" event={"ID":"b956d3a0-19db-47f7-bc95-b3371c1e7968","Type":"ContainerDied","Data":"44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.895463 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xrxfc" event={"ID":"b956d3a0-19db-47f7-bc95-b3371c1e7968","Type":"ContainerDied","Data":"f7f9596f1d8fbbf965e4c760eabcc7200bc168e3f8bd9acc750c7f481ce301c1"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.895469 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xrxfc" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.904215 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" podStartSLOduration=2.904202641 podStartE2EDuration="2.904202641s" podCreationTimestamp="2026-01-30 00:16:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:16:56.902814657 +0000 UTC m=+420.916877116" watchObservedRunningTime="2026-01-30 00:16:56.904202641 +0000 UTC m=+420.918265090" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.905080 5119 scope.go:117] "RemoveContainer" containerID="d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.905188 5119 generic.go:358] "Generic (PLEG): container finished" podID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerID="8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023" exitCode=0 Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.905272 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-889qp" event={"ID":"b5189257-f48b-458c-a523-2c1d73cd3f63","Type":"ContainerDied","Data":"8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.905296 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-889qp" event={"ID":"b5189257-f48b-458c-a523-2c1d73cd3f63","Type":"ContainerDied","Data":"4b6896bfc64e2f9d00c7528c683d1bbdb7580972278ecaf01de0268fab455564"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.905321 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-889qp" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.914749 5119 generic.go:358] "Generic (PLEG): container finished" podID="e627abc4-228d-4133-8f48-393e979d9826" containerID="7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d" exitCode=0 Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.914804 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" event={"ID":"e627abc4-228d-4133-8f48-393e979d9826","Type":"ContainerDied","Data":"7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.914830 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" event={"ID":"e627abc4-228d-4133-8f48-393e979d9826","Type":"ContainerDied","Data":"81fd8f10a2d32aa052b0316b43938ead36bbe778dae37e2c029d9767023f02d9"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.915302 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-xmw98" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.940375 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-2mvgv"] Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.942236 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" podStartSLOduration=2.94221353 podStartE2EDuration="2.94221353s" podCreationTimestamp="2026-01-30 00:16:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:16:56.926119577 +0000 UTC m=+420.940182056" watchObservedRunningTime="2026-01-30 00:16:56.94221353 +0000 UTC m=+420.956275979" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.958300 5119 generic.go:358] "Generic (PLEG): container finished" podID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerID="a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604" exitCode=0 Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.958512 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqrl2" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.958532 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqrl2" event={"ID":"88f818c0-b63a-4707-a2ab-49c0e9c58544","Type":"ContainerDied","Data":"a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.958839 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqrl2" event={"ID":"88f818c0-b63a-4707-a2ab-49c0e9c58544","Type":"ContainerDied","Data":"37050a125359e6253497ce3c02c472beafc88140ca48a22fac629dfba78ace59"} Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.992640 5119 scope.go:117] "RemoveContainer" containerID="81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6" Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.994220 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xrxfc"] Jan 30 00:16:56 crc kubenswrapper[5119]: I0130 00:16:56.997596 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xrxfc"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.000253 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2jg89"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.006935 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2jg89"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.016305 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jqrl2"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.022429 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jqrl2"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.027098 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-889qp"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.033742 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-889qp"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.042376 5119 scope.go:117] "RemoveContainer" containerID="08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.042822 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6\": container with ID starting with 08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6 not found: ID does not exist" containerID="08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.042862 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6"} err="failed to get container status \"08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6\": rpc error: code = NotFound desc = could not find container \"08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6\": container with ID starting with 08f74f7628afc8d8b28de478e64e0fa3cfa1332c6af329c4e4892550484af6b6 not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.042888 5119 scope.go:117] "RemoveContainer" containerID="d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.043310 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2\": container with ID starting with d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2 not found: ID does not exist" containerID="d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.043334 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2"} err="failed to get container status \"d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2\": rpc error: code = NotFound desc = could not find container \"d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2\": container with ID starting with d9ee7ea2821fd84fb74ce465c94520a7f1b005289da5124c4c5854d155d0d9c2 not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.043353 5119 scope.go:117] "RemoveContainer" containerID="81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.044045 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6\": container with ID starting with 81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6 not found: ID does not exist" containerID="81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.044075 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6"} err="failed to get container status \"81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6\": rpc error: code = NotFound desc = could not find container \"81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6\": container with ID starting with 81607bfe24d4a0310df2521d09a1ada42bfcdc25ddf0940210c7417ffe074ff6 not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.044092 5119 scope.go:117] "RemoveContainer" containerID="44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.049632 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-xmw98"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.053637 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-xmw98"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.067290 5119 scope.go:117] "RemoveContainer" containerID="1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.079955 5119 scope.go:117] "RemoveContainer" containerID="807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.111824 5119 scope.go:117] "RemoveContainer" containerID="44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.112330 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283\": container with ID starting with 44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283 not found: ID does not exist" containerID="44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.112407 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283"} err="failed to get container status \"44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283\": rpc error: code = NotFound desc = could not find container \"44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283\": container with ID starting with 44478c74ca9bbd8fba83c0ea403cd32b28484dd39c796af993638a5a0348b283 not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.112443 5119 scope.go:117] "RemoveContainer" containerID="1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.112973 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f\": container with ID starting with 1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f not found: ID does not exist" containerID="1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.113026 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f"} err="failed to get container status \"1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f\": rpc error: code = NotFound desc = could not find container \"1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f\": container with ID starting with 1076d3bc7c22b343ac278779680641afc7f196784a9b1b666c33fa0f2dd0a49f not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.113058 5119 scope.go:117] "RemoveContainer" containerID="807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.113484 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba\": container with ID starting with 807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba not found: ID does not exist" containerID="807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.113526 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba"} err="failed to get container status \"807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba\": rpc error: code = NotFound desc = could not find container \"807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba\": container with ID starting with 807190a1d1eb47da75ce4f3755b5725e603544661c418720517911190c53fbba not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.113556 5119 scope.go:117] "RemoveContainer" containerID="8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.129014 5119 scope.go:117] "RemoveContainer" containerID="36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.147743 5119 scope.go:117] "RemoveContainer" containerID="13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.167090 5119 scope.go:117] "RemoveContainer" containerID="8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.167864 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023\": container with ID starting with 8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023 not found: ID does not exist" containerID="8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.167953 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023"} err="failed to get container status \"8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023\": rpc error: code = NotFound desc = could not find container \"8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023\": container with ID starting with 8f3623a2eaba1bf998f12a543105c40b036f3425f147718426e450c5cca7f023 not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.168005 5119 scope.go:117] "RemoveContainer" containerID="36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.168708 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9\": container with ID starting with 36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9 not found: ID does not exist" containerID="36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.168764 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9"} err="failed to get container status \"36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9\": rpc error: code = NotFound desc = could not find container \"36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9\": container with ID starting with 36c68fd9f02fd8b7daff9207c3721c31066edb7a4cc717a19fa40902a3c558c9 not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.168797 5119 scope.go:117] "RemoveContainer" containerID="13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.169163 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc\": container with ID starting with 13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc not found: ID does not exist" containerID="13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.169199 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc"} err="failed to get container status \"13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc\": rpc error: code = NotFound desc = could not find container \"13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc\": container with ID starting with 13df5a7d3b0597995190840aa9105d21936f9f620a19d05fbeb3343b57e621dc not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.169226 5119 scope.go:117] "RemoveContainer" containerID="7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.191807 5119 scope.go:117] "RemoveContainer" containerID="cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.215045 5119 scope.go:117] "RemoveContainer" containerID="7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.215777 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d\": container with ID starting with 7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d not found: ID does not exist" containerID="7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.215860 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d"} err="failed to get container status \"7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d\": rpc error: code = NotFound desc = could not find container \"7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d\": container with ID starting with 7a09c03988e69809652d264d6d0ea65acbe6baeda51cc38f0719bc12ff9b2f3d not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.215913 5119 scope.go:117] "RemoveContainer" containerID="cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.216270 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4\": container with ID starting with cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4 not found: ID does not exist" containerID="cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.216302 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4"} err="failed to get container status \"cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4\": rpc error: code = NotFound desc = could not find container \"cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4\": container with ID starting with cff7047ddb0dc1d31bc7a1d83a517b0f87cbdf92fb925506bf3c2d4b7187f8d4 not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.216322 5119 scope.go:117] "RemoveContainer" containerID="a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.233313 5119 scope.go:117] "RemoveContainer" containerID="170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.249954 5119 scope.go:117] "RemoveContainer" containerID="088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.266852 5119 scope.go:117] "RemoveContainer" containerID="a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.267908 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604\": container with ID starting with a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604 not found: ID does not exist" containerID="a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.267950 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604"} err="failed to get container status \"a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604\": rpc error: code = NotFound desc = could not find container \"a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604\": container with ID starting with a52e2712b530ec03b05cd5f8202ef2803c851a69f2dfa094111efe96633f5604 not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.267972 5119 scope.go:117] "RemoveContainer" containerID="170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.268250 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408\": container with ID starting with 170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408 not found: ID does not exist" containerID="170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.268277 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408"} err="failed to get container status \"170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408\": rpc error: code = NotFound desc = could not find container \"170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408\": container with ID starting with 170de711980ad0208409fbaa6401828e3706c2e2c6e74c00bd9ed7c848c9a408 not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.268290 5119 scope.go:117] "RemoveContainer" containerID="088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb" Jan 30 00:16:57 crc kubenswrapper[5119]: E0130 00:16:57.271682 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb\": container with ID starting with 088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb not found: ID does not exist" containerID="088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.271715 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb"} err="failed to get container status \"088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb\": rpc error: code = NotFound desc = could not find container \"088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb\": container with ID starting with 088144ebc51a4da85188741c2f8efb18e4c6b7e35c1d37158dd0073a57c23aeb not found: ID does not exist" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.795946 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-85cj4"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796572 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerName="extract-content" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796585 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerName="extract-content" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796600 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerName="extract-utilities" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796606 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerName="extract-utilities" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796620 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796627 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796635 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerName="extract-utilities" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796641 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerName="extract-utilities" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796648 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796653 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796666 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796673 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796683 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerName="extract-utilities" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796689 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerName="extract-utilities" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796696 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796702 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796712 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="e627abc4-228d-4133-8f48-393e979d9826" containerName="marketplace-operator" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796718 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="e627abc4-228d-4133-8f48-393e979d9826" containerName="marketplace-operator" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796725 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerName="extract-content" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796731 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerName="extract-content" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796749 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerName="extract-content" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796755 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerName="extract-content" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796762 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="e627abc4-228d-4133-8f48-393e979d9826" containerName="marketplace-operator" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796769 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="e627abc4-228d-4133-8f48-393e979d9826" containerName="marketplace-operator" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796777 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerName="extract-utilities" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796783 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerName="extract-utilities" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796792 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerName="extract-content" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796798 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerName="extract-content" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796890 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="b956d3a0-19db-47f7-bc95-b3371c1e7968" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796900 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="b5189257-f48b-458c-a523-2c1d73cd3f63" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796909 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="88f818c0-b63a-4707-a2ab-49c0e9c58544" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796925 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" containerName="registry-server" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796935 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="e627abc4-228d-4133-8f48-393e979d9826" containerName="marketplace-operator" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.796944 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="e627abc4-228d-4133-8f48-393e979d9826" containerName="marketplace-operator" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.814142 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.814760 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-85cj4"] Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.817179 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"certified-operators-dockercfg-7cl8d\"" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.834074 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kszr\" (UniqueName: \"kubernetes.io/projected/51c13523-988e-45ad-94d9-effce777308f-kube-api-access-5kszr\") pod \"certified-operators-85cj4\" (UID: \"51c13523-988e-45ad-94d9-effce777308f\") " pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.834194 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51c13523-988e-45ad-94d9-effce777308f-catalog-content\") pod \"certified-operators-85cj4\" (UID: \"51c13523-988e-45ad-94d9-effce777308f\") " pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.834243 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51c13523-988e-45ad-94d9-effce777308f-utilities\") pod \"certified-operators-85cj4\" (UID: \"51c13523-988e-45ad-94d9-effce777308f\") " pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.934748 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51c13523-988e-45ad-94d9-effce777308f-utilities\") pod \"certified-operators-85cj4\" (UID: \"51c13523-988e-45ad-94d9-effce777308f\") " pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.934828 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-5kszr\" (UniqueName: \"kubernetes.io/projected/51c13523-988e-45ad-94d9-effce777308f-kube-api-access-5kszr\") pod \"certified-operators-85cj4\" (UID: \"51c13523-988e-45ad-94d9-effce777308f\") " pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.934883 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51c13523-988e-45ad-94d9-effce777308f-catalog-content\") pod \"certified-operators-85cj4\" (UID: \"51c13523-988e-45ad-94d9-effce777308f\") " pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.935332 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51c13523-988e-45ad-94d9-effce777308f-utilities\") pod \"certified-operators-85cj4\" (UID: \"51c13523-988e-45ad-94d9-effce777308f\") " pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.935543 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51c13523-988e-45ad-94d9-effce777308f-catalog-content\") pod \"certified-operators-85cj4\" (UID: \"51c13523-988e-45ad-94d9-effce777308f\") " pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.965664 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kszr\" (UniqueName: \"kubernetes.io/projected/51c13523-988e-45ad-94d9-effce777308f-kube-api-access-5kszr\") pod \"certified-operators-85cj4\" (UID: \"51c13523-988e-45ad-94d9-effce777308f\") " pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.968110 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" event={"ID":"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558","Type":"ContainerStarted","Data":"9e1dc2adc8c4e8b550fc83e0b339ce3d33cea91f70d5f1d4176bfe2e049ce026"} Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.968175 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" event={"ID":"5ca130d6-3d7a-4f6a-8ba5-5f0571f62558","Type":"ContainerStarted","Data":"d16462d8fdae3159495f2c44202c645331710bef2624624253a59380bc9e92f2"} Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.968198 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.973730 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" Jan 30 00:16:57 crc kubenswrapper[5119]: I0130 00:16:57.992820 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-547dbd544d-2mvgv" podStartSLOduration=2.992779522 podStartE2EDuration="2.992779522s" podCreationTimestamp="2026-01-30 00:16:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:16:57.987280458 +0000 UTC m=+422.001342907" watchObservedRunningTime="2026-01-30 00:16:57.992779522 +0000 UTC m=+422.006841981" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.136614 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.394362 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gdgwt"] Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.405999 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gdgwt"] Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.406155 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.409803 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-marketplace-dockercfg-gg4w7\"" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.439685 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-catalog-content\") pod \"redhat-marketplace-gdgwt\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.439757 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-utilities\") pod \"redhat-marketplace-gdgwt\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.439796 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjhlk\" (UniqueName: \"kubernetes.io/projected/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-kube-api-access-gjhlk\") pod \"redhat-marketplace-gdgwt\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.535678 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-85cj4"] Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.540359 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-utilities\") pod \"redhat-marketplace-gdgwt\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.540425 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gjhlk\" (UniqueName: \"kubernetes.io/projected/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-kube-api-access-gjhlk\") pod \"redhat-marketplace-gdgwt\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.540453 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-catalog-content\") pod \"redhat-marketplace-gdgwt\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.540897 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-utilities\") pod \"redhat-marketplace-gdgwt\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.540918 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-catalog-content\") pod \"redhat-marketplace-gdgwt\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: W0130 00:16:58.545032 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51c13523_988e_45ad_94d9_effce777308f.slice/crio-4f4f57383cd046f4544953c6edd6c9f45d4b1658acb8366bd00b8ef89b1c4ad8 WatchSource:0}: Error finding container 4f4f57383cd046f4544953c6edd6c9f45d4b1658acb8366bd00b8ef89b1c4ad8: Status 404 returned error can't find the container with id 4f4f57383cd046f4544953c6edd6c9f45d4b1658acb8366bd00b8ef89b1c4ad8 Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.562999 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjhlk\" (UniqueName: \"kubernetes.io/projected/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-kube-api-access-gjhlk\") pod \"redhat-marketplace-gdgwt\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.727408 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.758869 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88f818c0-b63a-4707-a2ab-49c0e9c58544" path="/var/lib/kubelet/pods/88f818c0-b63a-4707-a2ab-49c0e9c58544/volumes" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.759946 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5189257-f48b-458c-a523-2c1d73cd3f63" path="/var/lib/kubelet/pods/b5189257-f48b-458c-a523-2c1d73cd3f63/volumes" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.760693 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b956d3a0-19db-47f7-bc95-b3371c1e7968" path="/var/lib/kubelet/pods/b956d3a0-19db-47f7-bc95-b3371c1e7968/volumes" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.762044 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0a462d2-1b1f-47e8-9c33-1700f405a90d" path="/var/lib/kubelet/pods/c0a462d2-1b1f-47e8-9c33-1700f405a90d/volumes" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.763073 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e627abc4-228d-4133-8f48-393e979d9826" path="/var/lib/kubelet/pods/e627abc4-228d-4133-8f48-393e979d9826/volumes" Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.977253 5119 generic.go:358] "Generic (PLEG): container finished" podID="51c13523-988e-45ad-94d9-effce777308f" containerID="ed43ebba43c8eb47f7595f0e95f29cb6a835eee1187d19db82ee4b76891e4c98" exitCode=0 Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.977425 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-85cj4" event={"ID":"51c13523-988e-45ad-94d9-effce777308f","Type":"ContainerDied","Data":"ed43ebba43c8eb47f7595f0e95f29cb6a835eee1187d19db82ee4b76891e4c98"} Jan 30 00:16:58 crc kubenswrapper[5119]: I0130 00:16:58.977816 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-85cj4" event={"ID":"51c13523-988e-45ad-94d9-effce777308f","Type":"ContainerStarted","Data":"4f4f57383cd046f4544953c6edd6c9f45d4b1658acb8366bd00b8ef89b1c4ad8"} Jan 30 00:16:59 crc kubenswrapper[5119]: I0130 00:16:59.158590 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gdgwt"] Jan 30 00:16:59 crc kubenswrapper[5119]: W0130 00:16:59.163117 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8bcf91a5_cf2e_4c76_ba89_d00720fbf424.slice/crio-c98e68227fd8298b713fc22e15dce5b935c431ad7eda7778b121afa9c9e3538d WatchSource:0}: Error finding container c98e68227fd8298b713fc22e15dce5b935c431ad7eda7778b121afa9c9e3538d: Status 404 returned error can't find the container with id c98e68227fd8298b713fc22e15dce5b935c431ad7eda7778b121afa9c9e3538d Jan 30 00:16:59 crc kubenswrapper[5119]: I0130 00:16:59.985838 5119 generic.go:358] "Generic (PLEG): container finished" podID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerID="d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd" exitCode=0 Jan 30 00:16:59 crc kubenswrapper[5119]: I0130 00:16:59.985923 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gdgwt" event={"ID":"8bcf91a5-cf2e-4c76-ba89-d00720fbf424","Type":"ContainerDied","Data":"d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd"} Jan 30 00:16:59 crc kubenswrapper[5119]: I0130 00:16:59.986436 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gdgwt" event={"ID":"8bcf91a5-cf2e-4c76-ba89-d00720fbf424","Type":"ContainerStarted","Data":"c98e68227fd8298b713fc22e15dce5b935c431ad7eda7778b121afa9c9e3538d"} Jan 30 00:16:59 crc kubenswrapper[5119]: I0130 00:16:59.989140 5119 generic.go:358] "Generic (PLEG): container finished" podID="51c13523-988e-45ad-94d9-effce777308f" containerID="cc25e58a7012552ee102e3ef3adac4c3fb391c528de1482a3337bf0d20400022" exitCode=0 Jan 30 00:16:59 crc kubenswrapper[5119]: I0130 00:16:59.989863 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-85cj4" event={"ID":"51c13523-988e-45ad-94d9-effce777308f","Type":"ContainerDied","Data":"cc25e58a7012552ee102e3ef3adac4c3fb391c528de1482a3337bf0d20400022"} Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.204675 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hlk9w"] Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.215452 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hlk9w"] Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.215797 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.218430 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-operators-dockercfg-9gxlh\"" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.261159 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67405c86-4048-4098-a3e0-12fec1e771df-catalog-content\") pod \"redhat-operators-hlk9w\" (UID: \"67405c86-4048-4098-a3e0-12fec1e771df\") " pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.261237 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmdnq\" (UniqueName: \"kubernetes.io/projected/67405c86-4048-4098-a3e0-12fec1e771df-kube-api-access-hmdnq\") pod \"redhat-operators-hlk9w\" (UID: \"67405c86-4048-4098-a3e0-12fec1e771df\") " pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.261269 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67405c86-4048-4098-a3e0-12fec1e771df-utilities\") pod \"redhat-operators-hlk9w\" (UID: \"67405c86-4048-4098-a3e0-12fec1e771df\") " pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.309126 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-5d9d95bf5b-v9xt8"] Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.314426 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.331588 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-5d9d95bf5b-v9xt8"] Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.362758 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/78091a6f-876d-4629-bd61-980eb399f0eb-installation-pull-secrets\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.362799 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/78091a6f-876d-4629-bd61-980eb399f0eb-ca-trust-extracted\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.362833 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w68v\" (UniqueName: \"kubernetes.io/projected/78091a6f-876d-4629-bd61-980eb399f0eb-kube-api-access-7w68v\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.362863 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67405c86-4048-4098-a3e0-12fec1e771df-catalog-content\") pod \"redhat-operators-hlk9w\" (UID: \"67405c86-4048-4098-a3e0-12fec1e771df\") " pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.362907 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-hmdnq\" (UniqueName: \"kubernetes.io/projected/67405c86-4048-4098-a3e0-12fec1e771df-kube-api-access-hmdnq\") pod \"redhat-operators-hlk9w\" (UID: \"67405c86-4048-4098-a3e0-12fec1e771df\") " pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.362934 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67405c86-4048-4098-a3e0-12fec1e771df-utilities\") pod \"redhat-operators-hlk9w\" (UID: \"67405c86-4048-4098-a3e0-12fec1e771df\") " pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.362958 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/78091a6f-876d-4629-bd61-980eb399f0eb-bound-sa-token\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.362975 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/78091a6f-876d-4629-bd61-980eb399f0eb-registry-tls\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.363000 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.363025 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/78091a6f-876d-4629-bd61-980eb399f0eb-registry-certificates\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.363057 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/78091a6f-876d-4629-bd61-980eb399f0eb-trusted-ca\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.363324 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67405c86-4048-4098-a3e0-12fec1e771df-catalog-content\") pod \"redhat-operators-hlk9w\" (UID: \"67405c86-4048-4098-a3e0-12fec1e771df\") " pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.363486 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67405c86-4048-4098-a3e0-12fec1e771df-utilities\") pod \"redhat-operators-hlk9w\" (UID: \"67405c86-4048-4098-a3e0-12fec1e771df\") " pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.402343 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmdnq\" (UniqueName: \"kubernetes.io/projected/67405c86-4048-4098-a3e0-12fec1e771df-kube-api-access-hmdnq\") pod \"redhat-operators-hlk9w\" (UID: \"67405c86-4048-4098-a3e0-12fec1e771df\") " pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.420810 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.464160 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-7w68v\" (UniqueName: \"kubernetes.io/projected/78091a6f-876d-4629-bd61-980eb399f0eb-kube-api-access-7w68v\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.464475 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/78091a6f-876d-4629-bd61-980eb399f0eb-bound-sa-token\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.464748 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/78091a6f-876d-4629-bd61-980eb399f0eb-registry-tls\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.464846 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/78091a6f-876d-4629-bd61-980eb399f0eb-registry-certificates\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.464925 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/78091a6f-876d-4629-bd61-980eb399f0eb-trusted-ca\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.464974 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/78091a6f-876d-4629-bd61-980eb399f0eb-installation-pull-secrets\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.465003 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/78091a6f-876d-4629-bd61-980eb399f0eb-ca-trust-extracted\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.465619 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/78091a6f-876d-4629-bd61-980eb399f0eb-ca-trust-extracted\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.466326 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/78091a6f-876d-4629-bd61-980eb399f0eb-trusted-ca\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.466809 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/78091a6f-876d-4629-bd61-980eb399f0eb-registry-certificates\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.469036 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/78091a6f-876d-4629-bd61-980eb399f0eb-installation-pull-secrets\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.469436 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/78091a6f-876d-4629-bd61-980eb399f0eb-registry-tls\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.483126 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/78091a6f-876d-4629-bd61-980eb399f0eb-bound-sa-token\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.486910 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-7w68v\" (UniqueName: \"kubernetes.io/projected/78091a6f-876d-4629-bd61-980eb399f0eb-kube-api-access-7w68v\") pod \"image-registry-5d9d95bf5b-v9xt8\" (UID: \"78091a6f-876d-4629-bd61-980eb399f0eb\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.538283 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.632601 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.762558 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hlk9w"] Jan 30 00:17:00 crc kubenswrapper[5119]: W0130 00:17:00.779649 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67405c86_4048_4098_a3e0_12fec1e771df.slice/crio-c51297e2a19809e356bec1e6d3ecd3c27c68d2b36321335b462c50c38b47c023 WatchSource:0}: Error finding container c51297e2a19809e356bec1e6d3ecd3c27c68d2b36321335b462c50c38b47c023: Status 404 returned error can't find the container with id c51297e2a19809e356bec1e6d3ecd3c27c68d2b36321335b462c50c38b47c023 Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.794706 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wkk9s"] Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.811361 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wkk9s"] Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.811579 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.823783 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"community-operators-dockercfg-vrd5f\"" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.869405 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c498e919-938a-4abb-927a-efb274f1c744-utilities\") pod \"community-operators-wkk9s\" (UID: \"c498e919-938a-4abb-927a-efb274f1c744\") " pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.869461 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gf2k\" (UniqueName: \"kubernetes.io/projected/c498e919-938a-4abb-927a-efb274f1c744-kube-api-access-4gf2k\") pod \"community-operators-wkk9s\" (UID: \"c498e919-938a-4abb-927a-efb274f1c744\") " pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.869481 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c498e919-938a-4abb-927a-efb274f1c744-catalog-content\") pod \"community-operators-wkk9s\" (UID: \"c498e919-938a-4abb-927a-efb274f1c744\") " pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.970918 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c498e919-938a-4abb-927a-efb274f1c744-utilities\") pod \"community-operators-wkk9s\" (UID: \"c498e919-938a-4abb-927a-efb274f1c744\") " pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.971268 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-4gf2k\" (UniqueName: \"kubernetes.io/projected/c498e919-938a-4abb-927a-efb274f1c744-kube-api-access-4gf2k\") pod \"community-operators-wkk9s\" (UID: \"c498e919-938a-4abb-927a-efb274f1c744\") " pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.971308 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c498e919-938a-4abb-927a-efb274f1c744-catalog-content\") pod \"community-operators-wkk9s\" (UID: \"c498e919-938a-4abb-927a-efb274f1c744\") " pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.971322 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c498e919-938a-4abb-927a-efb274f1c744-utilities\") pod \"community-operators-wkk9s\" (UID: \"c498e919-938a-4abb-927a-efb274f1c744\") " pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:00 crc kubenswrapper[5119]: I0130 00:17:00.971764 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c498e919-938a-4abb-927a-efb274f1c744-catalog-content\") pod \"community-operators-wkk9s\" (UID: \"c498e919-938a-4abb-927a-efb274f1c744\") " pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:01 crc kubenswrapper[5119]: I0130 00:17:01.002689 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gf2k\" (UniqueName: \"kubernetes.io/projected/c498e919-938a-4abb-927a-efb274f1c744-kube-api-access-4gf2k\") pod \"community-operators-wkk9s\" (UID: \"c498e919-938a-4abb-927a-efb274f1c744\") " pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:01 crc kubenswrapper[5119]: I0130 00:17:01.015183 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gdgwt" event={"ID":"8bcf91a5-cf2e-4c76-ba89-d00720fbf424","Type":"ContainerStarted","Data":"29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60"} Jan 30 00:17:01 crc kubenswrapper[5119]: I0130 00:17:01.020805 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-85cj4" event={"ID":"51c13523-988e-45ad-94d9-effce777308f","Type":"ContainerStarted","Data":"2f0e2debec4486f21e4b157397d8071208c3c3ac37fbc47765e99e50a833a047"} Jan 30 00:17:01 crc kubenswrapper[5119]: I0130 00:17:01.024494 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlk9w" event={"ID":"67405c86-4048-4098-a3e0-12fec1e771df","Type":"ContainerStarted","Data":"c51297e2a19809e356bec1e6d3ecd3c27c68d2b36321335b462c50c38b47c023"} Jan 30 00:17:01 crc kubenswrapper[5119]: I0130 00:17:01.060564 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-85cj4" podStartSLOduration=3.34812046 podStartE2EDuration="4.060538716s" podCreationTimestamp="2026-01-30 00:16:57 +0000 UTC" firstStartedPulling="2026-01-30 00:16:58.978217181 +0000 UTC m=+422.992279640" lastFinishedPulling="2026-01-30 00:16:59.690635397 +0000 UTC m=+423.704697896" observedRunningTime="2026-01-30 00:17:01.054660902 +0000 UTC m=+425.068723361" watchObservedRunningTime="2026-01-30 00:17:01.060538716 +0000 UTC m=+425.074601175" Jan 30 00:17:01 crc kubenswrapper[5119]: I0130 00:17:01.119828 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-5d9d95bf5b-v9xt8"] Jan 30 00:17:01 crc kubenswrapper[5119]: W0130 00:17:01.129238 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78091a6f_876d_4629_bd61_980eb399f0eb.slice/crio-55c7c49b9a63121db7ec8f206b8ea3df1b84dfc08b3f4aae204da0d10f2c12f0 WatchSource:0}: Error finding container 55c7c49b9a63121db7ec8f206b8ea3df1b84dfc08b3f4aae204da0d10f2c12f0: Status 404 returned error can't find the container with id 55c7c49b9a63121db7ec8f206b8ea3df1b84dfc08b3f4aae204da0d10f2c12f0 Jan 30 00:17:01 crc kubenswrapper[5119]: I0130 00:17:01.184838 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:01 crc kubenswrapper[5119]: I0130 00:17:01.629633 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wkk9s"] Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.034798 5119 generic.go:358] "Generic (PLEG): container finished" podID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerID="29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60" exitCode=0 Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.034894 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gdgwt" event={"ID":"8bcf91a5-cf2e-4c76-ba89-d00720fbf424","Type":"ContainerDied","Data":"29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60"} Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.034937 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gdgwt" event={"ID":"8bcf91a5-cf2e-4c76-ba89-d00720fbf424","Type":"ContainerStarted","Data":"a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774"} Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.036132 5119 generic.go:358] "Generic (PLEG): container finished" podID="c498e919-938a-4abb-927a-efb274f1c744" containerID="665aaccad467970e78ed89dbf7f9d018ebfb1862b4349b14df39232241a10721" exitCode=0 Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.036165 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkk9s" event={"ID":"c498e919-938a-4abb-927a-efb274f1c744","Type":"ContainerDied","Data":"665aaccad467970e78ed89dbf7f9d018ebfb1862b4349b14df39232241a10721"} Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.036211 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkk9s" event={"ID":"c498e919-938a-4abb-927a-efb274f1c744","Type":"ContainerStarted","Data":"65f4e08ae881be8724bb4928c22f3d4c8a46d322ae8434194c8103d208f26721"} Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.037896 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" event={"ID":"78091a6f-876d-4629-bd61-980eb399f0eb","Type":"ContainerStarted","Data":"de187fce3f8b317d1ae6f97f2b88a7af9c2054403547c26c808ed8a0cf25ce7f"} Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.037929 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" event={"ID":"78091a6f-876d-4629-bd61-980eb399f0eb","Type":"ContainerStarted","Data":"55c7c49b9a63121db7ec8f206b8ea3df1b84dfc08b3f4aae204da0d10f2c12f0"} Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.037996 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.039075 5119 generic.go:358] "Generic (PLEG): container finished" podID="67405c86-4048-4098-a3e0-12fec1e771df" containerID="345cb34d2fe045d8a87353691031bb3ccacfd89c1c8302a46bb1c0c615a3b961" exitCode=0 Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.039218 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlk9w" event={"ID":"67405c86-4048-4098-a3e0-12fec1e771df","Type":"ContainerDied","Data":"345cb34d2fe045d8a87353691031bb3ccacfd89c1c8302a46bb1c0c615a3b961"} Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.051343 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gdgwt" podStartSLOduration=3.41502038 podStartE2EDuration="4.051326685s" podCreationTimestamp="2026-01-30 00:16:58 +0000 UTC" firstStartedPulling="2026-01-30 00:16:59.986757046 +0000 UTC m=+424.000819505" lastFinishedPulling="2026-01-30 00:17:00.623063351 +0000 UTC m=+424.637125810" observedRunningTime="2026-01-30 00:17:02.051179992 +0000 UTC m=+426.065242451" watchObservedRunningTime="2026-01-30 00:17:02.051326685 +0000 UTC m=+426.065389144" Jan 30 00:17:02 crc kubenswrapper[5119]: I0130 00:17:02.097137 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" podStartSLOduration=2.097118485 podStartE2EDuration="2.097118485s" podCreationTimestamp="2026-01-30 00:17:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:17:02.094630494 +0000 UTC m=+426.108692983" watchObservedRunningTime="2026-01-30 00:17:02.097118485 +0000 UTC m=+426.111180944" Jan 30 00:17:03 crc kubenswrapper[5119]: I0130 00:17:03.048518 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkk9s" event={"ID":"c498e919-938a-4abb-927a-efb274f1c744","Type":"ContainerStarted","Data":"41b2f5695859c5a66e16bd7b3f443a1cb9caa408376b6effb561ef5d29294a40"} Jan 30 00:17:03 crc kubenswrapper[5119]: I0130 00:17:03.051126 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlk9w" event={"ID":"67405c86-4048-4098-a3e0-12fec1e771df","Type":"ContainerStarted","Data":"206fbb4c55488bdc461f0e37756c604a05ba2c69b56c009a808a8f9d6c244ef9"} Jan 30 00:17:04 crc kubenswrapper[5119]: I0130 00:17:04.059453 5119 generic.go:358] "Generic (PLEG): container finished" podID="67405c86-4048-4098-a3e0-12fec1e771df" containerID="206fbb4c55488bdc461f0e37756c604a05ba2c69b56c009a808a8f9d6c244ef9" exitCode=0 Jan 30 00:17:04 crc kubenswrapper[5119]: I0130 00:17:04.059549 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlk9w" event={"ID":"67405c86-4048-4098-a3e0-12fec1e771df","Type":"ContainerDied","Data":"206fbb4c55488bdc461f0e37756c604a05ba2c69b56c009a808a8f9d6c244ef9"} Jan 30 00:17:04 crc kubenswrapper[5119]: I0130 00:17:04.063889 5119 generic.go:358] "Generic (PLEG): container finished" podID="c498e919-938a-4abb-927a-efb274f1c744" containerID="41b2f5695859c5a66e16bd7b3f443a1cb9caa408376b6effb561ef5d29294a40" exitCode=0 Jan 30 00:17:04 crc kubenswrapper[5119]: I0130 00:17:04.063949 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkk9s" event={"ID":"c498e919-938a-4abb-927a-efb274f1c744","Type":"ContainerDied","Data":"41b2f5695859c5a66e16bd7b3f443a1cb9caa408376b6effb561ef5d29294a40"} Jan 30 00:17:05 crc kubenswrapper[5119]: I0130 00:17:05.072864 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkk9s" event={"ID":"c498e919-938a-4abb-927a-efb274f1c744","Type":"ContainerStarted","Data":"13251082f93a93c070647d6905499b559ae905ef3f58e1950ba53eb652ef97ea"} Jan 30 00:17:05 crc kubenswrapper[5119]: I0130 00:17:05.075136 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlk9w" event={"ID":"67405c86-4048-4098-a3e0-12fec1e771df","Type":"ContainerStarted","Data":"a7fa82f70ab3f28c4a78a52e31eacd4a4608e4e0210feea173fa1e10ff8a7061"} Jan 30 00:17:05 crc kubenswrapper[5119]: I0130 00:17:05.122146 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wkk9s" podStartSLOduration=4.448934567 podStartE2EDuration="5.122126654s" podCreationTimestamp="2026-01-30 00:17:00 +0000 UTC" firstStartedPulling="2026-01-30 00:17:02.036887522 +0000 UTC m=+426.050949981" lastFinishedPulling="2026-01-30 00:17:02.710079619 +0000 UTC m=+426.724142068" observedRunningTime="2026-01-30 00:17:05.093006752 +0000 UTC m=+429.107069221" watchObservedRunningTime="2026-01-30 00:17:05.122126654 +0000 UTC m=+429.136189113" Jan 30 00:17:05 crc kubenswrapper[5119]: I0130 00:17:05.123689 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hlk9w" podStartSLOduration=4.420680027 podStartE2EDuration="5.123684072s" podCreationTimestamp="2026-01-30 00:17:00 +0000 UTC" firstStartedPulling="2026-01-30 00:17:02.03964142 +0000 UTC m=+426.053703879" lastFinishedPulling="2026-01-30 00:17:02.742645465 +0000 UTC m=+426.756707924" observedRunningTime="2026-01-30 00:17:05.12194345 +0000 UTC m=+429.136005909" watchObservedRunningTime="2026-01-30 00:17:05.123684072 +0000 UTC m=+429.137746531" Jan 30 00:17:08 crc kubenswrapper[5119]: I0130 00:17:08.137757 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:17:08 crc kubenswrapper[5119]: I0130 00:17:08.138254 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:17:08 crc kubenswrapper[5119]: I0130 00:17:08.191310 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:17:08 crc kubenswrapper[5119]: I0130 00:17:08.728891 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:17:08 crc kubenswrapper[5119]: I0130 00:17:08.728942 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:17:08 crc kubenswrapper[5119]: I0130 00:17:08.770323 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:17:09 crc kubenswrapper[5119]: I0130 00:17:09.129267 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:17:09 crc kubenswrapper[5119]: I0130 00:17:09.129529 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-85cj4" Jan 30 00:17:10 crc kubenswrapper[5119]: I0130 00:17:10.538962 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:10 crc kubenswrapper[5119]: I0130 00:17:10.539243 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:10 crc kubenswrapper[5119]: I0130 00:17:10.583791 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:11 crc kubenswrapper[5119]: I0130 00:17:11.171279 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hlk9w" Jan 30 00:17:11 crc kubenswrapper[5119]: I0130 00:17:11.186453 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:11 crc kubenswrapper[5119]: I0130 00:17:11.186507 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:11 crc kubenswrapper[5119]: I0130 00:17:11.243893 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:12 crc kubenswrapper[5119]: I0130 00:17:12.154249 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wkk9s" Jan 30 00:17:14 crc kubenswrapper[5119]: I0130 00:17:14.316552 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk"] Jan 30 00:17:14 crc kubenswrapper[5119]: I0130 00:17:14.317072 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" podUID="56f52cd3-d550-4724-8c03-bc908a45648a" containerName="controller-manager" containerID="cri-o://a4c1afdab4dd88f65a3139334d4d5388dac9f5ade00984b14015178e68190eff" gracePeriod=30 Jan 30 00:17:14 crc kubenswrapper[5119]: I0130 00:17:14.348122 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb"] Jan 30 00:17:14 crc kubenswrapper[5119]: I0130 00:17:14.348487 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" podUID="02044f8a-bf33-4525-b335-6ae4d67ce350" containerName="route-controller-manager" containerID="cri-o://dbe42818d03fc245dda0002ff1b46aae0a2a3f1ba001f6a942a7d3d44e20c9e3" gracePeriod=30 Jan 30 00:17:16 crc kubenswrapper[5119]: I0130 00:17:16.884541 5119 patch_prober.go:28] interesting pod/controller-manager-7d4657cfcf-zzkxk container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.72:8443/healthz\": dial tcp 10.217.0.72:8443: connect: connection refused" start-of-body= Jan 30 00:17:16 crc kubenswrapper[5119]: I0130 00:17:16.884921 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" podUID="56f52cd3-d550-4724-8c03-bc908a45648a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.72:8443/healthz\": dial tcp 10.217.0.72:8443: connect: connection refused" Jan 30 00:17:16 crc kubenswrapper[5119]: I0130 00:17:16.889286 5119 patch_prober.go:28] interesting pod/route-controller-manager-78b7754959-kd4pb container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.71:8443/healthz\": dial tcp 10.217.0.71:8443: connect: connection refused" start-of-body= Jan 30 00:17:16 crc kubenswrapper[5119]: I0130 00:17:16.889334 5119 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" podUID="02044f8a-bf33-4525-b335-6ae4d67ce350" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.71:8443/healthz\": dial tcp 10.217.0.71:8443: connect: connection refused" Jan 30 00:17:17 crc kubenswrapper[5119]: I0130 00:17:17.139565 5119 generic.go:358] "Generic (PLEG): container finished" podID="56f52cd3-d550-4724-8c03-bc908a45648a" containerID="a4c1afdab4dd88f65a3139334d4d5388dac9f5ade00984b14015178e68190eff" exitCode=0 Jan 30 00:17:17 crc kubenswrapper[5119]: I0130 00:17:17.139667 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" event={"ID":"56f52cd3-d550-4724-8c03-bc908a45648a","Type":"ContainerDied","Data":"a4c1afdab4dd88f65a3139334d4d5388dac9f5ade00984b14015178e68190eff"} Jan 30 00:17:17 crc kubenswrapper[5119]: I0130 00:17:17.141250 5119 generic.go:358] "Generic (PLEG): container finished" podID="02044f8a-bf33-4525-b335-6ae4d67ce350" containerID="dbe42818d03fc245dda0002ff1b46aae0a2a3f1ba001f6a942a7d3d44e20c9e3" exitCode=0 Jan 30 00:17:17 crc kubenswrapper[5119]: I0130 00:17:17.141281 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" event={"ID":"02044f8a-bf33-4525-b335-6ae4d67ce350","Type":"ContainerDied","Data":"dbe42818d03fc245dda0002ff1b46aae0a2a3f1ba001f6a942a7d3d44e20c9e3"} Jan 30 00:17:17 crc kubenswrapper[5119]: I0130 00:17:17.999508 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.032062 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb"] Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.032664 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="02044f8a-bf33-4525-b335-6ae4d67ce350" containerName="route-controller-manager" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.032683 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="02044f8a-bf33-4525-b335-6ae4d67ce350" containerName="route-controller-manager" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.032767 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="02044f8a-bf33-4525-b335-6ae4d67ce350" containerName="route-controller-manager" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.038309 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.086024 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb"] Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.113717 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gjcs\" (UniqueName: \"kubernetes.io/projected/02044f8a-bf33-4525-b335-6ae4d67ce350-kube-api-access-9gjcs\") pod \"02044f8a-bf33-4525-b335-6ae4d67ce350\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.113813 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-client-ca\") pod \"02044f8a-bf33-4525-b335-6ae4d67ce350\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.113834 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/02044f8a-bf33-4525-b335-6ae4d67ce350-tmp\") pod \"02044f8a-bf33-4525-b335-6ae4d67ce350\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.113881 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-config\") pod \"02044f8a-bf33-4525-b335-6ae4d67ce350\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.113933 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02044f8a-bf33-4525-b335-6ae4d67ce350-serving-cert\") pod \"02044f8a-bf33-4525-b335-6ae4d67ce350\" (UID: \"02044f8a-bf33-4525-b335-6ae4d67ce350\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.114088 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8pbl\" (UniqueName: \"kubernetes.io/projected/890d17db-54c8-4812-ab84-84b4d36cf3ca-kube-api-access-v8pbl\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.114127 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/890d17db-54c8-4812-ab84-84b4d36cf3ca-client-ca\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.114142 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/890d17db-54c8-4812-ab84-84b4d36cf3ca-tmp\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.114166 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/890d17db-54c8-4812-ab84-84b4d36cf3ca-serving-cert\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.114229 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/890d17db-54c8-4812-ab84-84b4d36cf3ca-config\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.115087 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02044f8a-bf33-4525-b335-6ae4d67ce350-tmp" (OuterVolumeSpecName: "tmp") pod "02044f8a-bf33-4525-b335-6ae4d67ce350" (UID: "02044f8a-bf33-4525-b335-6ae4d67ce350"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.115152 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-config" (OuterVolumeSpecName: "config") pod "02044f8a-bf33-4525-b335-6ae4d67ce350" (UID: "02044f8a-bf33-4525-b335-6ae4d67ce350"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.116132 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-client-ca" (OuterVolumeSpecName: "client-ca") pod "02044f8a-bf33-4525-b335-6ae4d67ce350" (UID: "02044f8a-bf33-4525-b335-6ae4d67ce350"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.120209 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02044f8a-bf33-4525-b335-6ae4d67ce350-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "02044f8a-bf33-4525-b335-6ae4d67ce350" (UID: "02044f8a-bf33-4525-b335-6ae4d67ce350"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.120445 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02044f8a-bf33-4525-b335-6ae4d67ce350-kube-api-access-9gjcs" (OuterVolumeSpecName: "kube-api-access-9gjcs") pod "02044f8a-bf33-4525-b335-6ae4d67ce350" (UID: "02044f8a-bf33-4525-b335-6ae4d67ce350"). InnerVolumeSpecName "kube-api-access-9gjcs". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.147686 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" event={"ID":"56f52cd3-d550-4724-8c03-bc908a45648a","Type":"ContainerDied","Data":"955627af2428fd183ef9e4887ea785c74228bcfadde8af32eecad95e8b42a2a1"} Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.147724 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="955627af2428fd183ef9e4887ea785c74228bcfadde8af32eecad95e8b42a2a1" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.149142 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" event={"ID":"02044f8a-bf33-4525-b335-6ae4d67ce350","Type":"ContainerDied","Data":"a44a3fc7f7e7669d35284436ccad84e29d4e777432dd4248755ad252e60ce520"} Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.149190 5119 scope.go:117] "RemoveContainer" containerID="dbe42818d03fc245dda0002ff1b46aae0a2a3f1ba001f6a942a7d3d44e20c9e3" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.149224 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.151296 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.188829 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8465f9d74d-4ml6h"] Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.190121 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="56f52cd3-d550-4724-8c03-bc908a45648a" containerName="controller-manager" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.190143 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="56f52cd3-d550-4724-8c03-bc908a45648a" containerName="controller-manager" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.190258 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="56f52cd3-d550-4724-8c03-bc908a45648a" containerName="controller-manager" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.202827 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb"] Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.202870 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78b7754959-kd4pb"] Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.202893 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8465f9d74d-4ml6h"] Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.203003 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215330 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-config\") pod \"56f52cd3-d550-4724-8c03-bc908a45648a\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215421 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-proxy-ca-bundles\") pod \"56f52cd3-d550-4724-8c03-bc908a45648a\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215443 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vq4x7\" (UniqueName: \"kubernetes.io/projected/56f52cd3-d550-4724-8c03-bc908a45648a-kube-api-access-vq4x7\") pod \"56f52cd3-d550-4724-8c03-bc908a45648a\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215465 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56f52cd3-d550-4724-8c03-bc908a45648a-serving-cert\") pod \"56f52cd3-d550-4724-8c03-bc908a45648a\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215486 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/56f52cd3-d550-4724-8c03-bc908a45648a-tmp\") pod \"56f52cd3-d550-4724-8c03-bc908a45648a\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215542 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-client-ca\") pod \"56f52cd3-d550-4724-8c03-bc908a45648a\" (UID: \"56f52cd3-d550-4724-8c03-bc908a45648a\") " Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215707 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/890d17db-54c8-4812-ab84-84b4d36cf3ca-config\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215750 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-v8pbl\" (UniqueName: \"kubernetes.io/projected/890d17db-54c8-4812-ab84-84b4d36cf3ca-kube-api-access-v8pbl\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215797 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/890d17db-54c8-4812-ab84-84b4d36cf3ca-client-ca\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215819 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/890d17db-54c8-4812-ab84-84b4d36cf3ca-tmp\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215848 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/890d17db-54c8-4812-ab84-84b4d36cf3ca-serving-cert\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215902 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215912 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/02044f8a-bf33-4525-b335-6ae4d67ce350-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215923 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02044f8a-bf33-4525-b335-6ae4d67ce350-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215932 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02044f8a-bf33-4525-b335-6ae4d67ce350-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.215941 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-9gjcs\" (UniqueName: \"kubernetes.io/projected/02044f8a-bf33-4525-b335-6ae4d67ce350-kube-api-access-9gjcs\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.217824 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-config" (OuterVolumeSpecName: "config") pod "56f52cd3-d550-4724-8c03-bc908a45648a" (UID: "56f52cd3-d550-4724-8c03-bc908a45648a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.218165 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "56f52cd3-d550-4724-8c03-bc908a45648a" (UID: "56f52cd3-d550-4724-8c03-bc908a45648a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.218467 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-client-ca" (OuterVolumeSpecName: "client-ca") pod "56f52cd3-d550-4724-8c03-bc908a45648a" (UID: "56f52cd3-d550-4724-8c03-bc908a45648a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.218792 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56f52cd3-d550-4724-8c03-bc908a45648a-tmp" (OuterVolumeSpecName: "tmp") pod "56f52cd3-d550-4724-8c03-bc908a45648a" (UID: "56f52cd3-d550-4724-8c03-bc908a45648a"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.219257 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/890d17db-54c8-4812-ab84-84b4d36cf3ca-config\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.220109 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/890d17db-54c8-4812-ab84-84b4d36cf3ca-serving-cert\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.220230 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/890d17db-54c8-4812-ab84-84b4d36cf3ca-client-ca\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.220364 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/890d17db-54c8-4812-ab84-84b4d36cf3ca-tmp\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.221974 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56f52cd3-d550-4724-8c03-bc908a45648a-kube-api-access-vq4x7" (OuterVolumeSpecName: "kube-api-access-vq4x7") pod "56f52cd3-d550-4724-8c03-bc908a45648a" (UID: "56f52cd3-d550-4724-8c03-bc908a45648a"). InnerVolumeSpecName "kube-api-access-vq4x7". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.237565 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8pbl\" (UniqueName: \"kubernetes.io/projected/890d17db-54c8-4812-ab84-84b4d36cf3ca-kube-api-access-v8pbl\") pod \"route-controller-manager-7b9468fbfb-l9ktb\" (UID: \"890d17db-54c8-4812-ab84-84b4d36cf3ca\") " pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.238727 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56f52cd3-d550-4724-8c03-bc908a45648a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "56f52cd3-d550-4724-8c03-bc908a45648a" (UID: "56f52cd3-d550-4724-8c03-bc908a45648a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317283 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bafa1248-ef6c-4f86-a632-4b78d38c1b26-client-ca\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317358 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nrpl\" (UniqueName: \"kubernetes.io/projected/bafa1248-ef6c-4f86-a632-4b78d38c1b26-kube-api-access-2nrpl\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317407 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bafa1248-ef6c-4f86-a632-4b78d38c1b26-config\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317468 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bafa1248-ef6c-4f86-a632-4b78d38c1b26-serving-cert\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317614 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/bafa1248-ef6c-4f86-a632-4b78d38c1b26-tmp\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317738 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bafa1248-ef6c-4f86-a632-4b78d38c1b26-proxy-ca-bundles\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317864 5119 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317890 5119 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317904 5119 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/56f52cd3-d550-4724-8c03-bc908a45648a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317917 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-vq4x7\" (UniqueName: \"kubernetes.io/projected/56f52cd3-d550-4724-8c03-bc908a45648a-kube-api-access-vq4x7\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317930 5119 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56f52cd3-d550-4724-8c03-bc908a45648a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.317941 5119 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/56f52cd3-d550-4724-8c03-bc908a45648a-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.375292 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.419069 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2nrpl\" (UniqueName: \"kubernetes.io/projected/bafa1248-ef6c-4f86-a632-4b78d38c1b26-kube-api-access-2nrpl\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.419127 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bafa1248-ef6c-4f86-a632-4b78d38c1b26-config\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.419165 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bafa1248-ef6c-4f86-a632-4b78d38c1b26-serving-cert\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.419204 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/bafa1248-ef6c-4f86-a632-4b78d38c1b26-tmp\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.419249 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bafa1248-ef6c-4f86-a632-4b78d38c1b26-proxy-ca-bundles\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.419313 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bafa1248-ef6c-4f86-a632-4b78d38c1b26-client-ca\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.420048 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/bafa1248-ef6c-4f86-a632-4b78d38c1b26-tmp\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.420421 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bafa1248-ef6c-4f86-a632-4b78d38c1b26-client-ca\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.420607 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bafa1248-ef6c-4f86-a632-4b78d38c1b26-config\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.420969 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bafa1248-ef6c-4f86-a632-4b78d38c1b26-proxy-ca-bundles\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.422898 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bafa1248-ef6c-4f86-a632-4b78d38c1b26-serving-cert\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.450004 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nrpl\" (UniqueName: \"kubernetes.io/projected/bafa1248-ef6c-4f86-a632-4b78d38c1b26-kube-api-access-2nrpl\") pod \"controller-manager-8465f9d74d-4ml6h\" (UID: \"bafa1248-ef6c-4f86-a632-4b78d38c1b26\") " pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.518586 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.765125 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02044f8a-bf33-4525-b335-6ae4d67ce350" path="/var/lib/kubelet/pods/02044f8a-bf33-4525-b335-6ae4d67ce350/volumes" Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.766506 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb"] Jan 30 00:17:18 crc kubenswrapper[5119]: W0130 00:17:18.774794 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod890d17db_54c8_4812_ab84_84b4d36cf3ca.slice/crio-14fb5c386c9438ee26cc35a01a79b008789f6f8d917c1c89d2d25a5f47a01044 WatchSource:0}: Error finding container 14fb5c386c9438ee26cc35a01a79b008789f6f8d917c1c89d2d25a5f47a01044: Status 404 returned error can't find the container with id 14fb5c386c9438ee26cc35a01a79b008789f6f8d917c1c89d2d25a5f47a01044 Jan 30 00:17:18 crc kubenswrapper[5119]: I0130 00:17:18.886941 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8465f9d74d-4ml6h"] Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.157546 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" event={"ID":"bafa1248-ef6c-4f86-a632-4b78d38c1b26","Type":"ContainerStarted","Data":"37ce37c7be1fc1422d1a36a0465cf659a3062b518fe52118198c4d4f5ac4bc90"} Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.157602 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" event={"ID":"bafa1248-ef6c-4f86-a632-4b78d38c1b26","Type":"ContainerStarted","Data":"1002f7def05d02fa507c0c97bed7736307d45c06fbdccc45d3a89aa4106c872e"} Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.160268 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" event={"ID":"890d17db-54c8-4812-ab84-84b4d36cf3ca","Type":"ContainerStarted","Data":"0b8d784783643be7756188d36ac066ec5be6a09c0a0de6503d70f688857bf8c3"} Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.160331 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" event={"ID":"890d17db-54c8-4812-ab84-84b4d36cf3ca","Type":"ContainerStarted","Data":"14fb5c386c9438ee26cc35a01a79b008789f6f8d917c1c89d2d25a5f47a01044"} Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.160331 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk" Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.160575 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.175858 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" podStartSLOduration=5.175836797 podStartE2EDuration="5.175836797s" podCreationTimestamp="2026-01-30 00:17:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:17:19.171788588 +0000 UTC m=+443.185851057" watchObservedRunningTime="2026-01-30 00:17:19.175836797 +0000 UTC m=+443.189899276" Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.190943 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" podStartSLOduration=5.190921856 podStartE2EDuration="5.190921856s" podCreationTimestamp="2026-01-30 00:17:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:17:19.186000626 +0000 UTC m=+443.200063125" watchObservedRunningTime="2026-01-30 00:17:19.190921856 +0000 UTC m=+443.204984355" Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.206895 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk"] Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.212294 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7d4657cfcf-zzkxk"] Jan 30 00:17:19 crc kubenswrapper[5119]: I0130 00:17:19.560752 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7b9468fbfb-l9ktb" Jan 30 00:17:20 crc kubenswrapper[5119]: I0130 00:17:20.166385 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:20 crc kubenswrapper[5119]: I0130 00:17:20.172027 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8465f9d74d-4ml6h" Jan 30 00:17:20 crc kubenswrapper[5119]: I0130 00:17:20.762954 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56f52cd3-d550-4724-8c03-bc908a45648a" path="/var/lib/kubelet/pods/56f52cd3-d550-4724-8c03-bc908a45648a/volumes" Jan 30 00:17:23 crc kubenswrapper[5119]: I0130 00:17:23.060148 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-5d9d95bf5b-v9xt8" Jan 30 00:17:23 crc kubenswrapper[5119]: I0130 00:17:23.112405 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-c5xkv"] Jan 30 00:17:24 crc kubenswrapper[5119]: I0130 00:17:24.370965 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:17:24 crc kubenswrapper[5119]: I0130 00:17:24.371277 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:17:24 crc kubenswrapper[5119]: I0130 00:17:24.371316 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:17:24 crc kubenswrapper[5119]: I0130 00:17:24.371872 5119 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2293b955c1384c8479b87ad35b70303afcaed7f3f538d92a41b347eff7768adf"} pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:17:24 crc kubenswrapper[5119]: I0130 00:17:24.371929 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" containerID="cri-o://2293b955c1384c8479b87ad35b70303afcaed7f3f538d92a41b347eff7768adf" gracePeriod=600 Jan 30 00:17:25 crc kubenswrapper[5119]: I0130 00:17:25.196612 5119 generic.go:358] "Generic (PLEG): container finished" podID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerID="2293b955c1384c8479b87ad35b70303afcaed7f3f538d92a41b347eff7768adf" exitCode=0 Jan 30 00:17:25 crc kubenswrapper[5119]: I0130 00:17:25.196689 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerDied","Data":"2293b955c1384c8479b87ad35b70303afcaed7f3f538d92a41b347eff7768adf"} Jan 30 00:17:25 crc kubenswrapper[5119]: I0130 00:17:25.196733 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerStarted","Data":"acb18e93ac4afc8e87c29f0f393415c46b320cde48c74a568ec3f5fc1b3d28d2"} Jan 30 00:17:25 crc kubenswrapper[5119]: I0130 00:17:25.196752 5119 scope.go:117] "RemoveContainer" containerID="4ec9f325eee2102e27ce2c2c8fd3570bc6b933200f4125272f5d5dc6a4741502" Jan 30 00:17:40 crc kubenswrapper[5119]: I0130 00:17:40.960520 5119 ???:1] "http: TLS handshake error from 192.168.126.11:53456: no serving certificate available for the kubelet" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.153247 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" podUID="5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" containerName="registry" containerID="cri-o://d32a8a187a68cc7cb3989c18cff3ebdd551562df85fb9ce5ba423fdfc0b302b3" gracePeriod=30 Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.342118 5119 generic.go:358] "Generic (PLEG): container finished" podID="5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" containerID="d32a8a187a68cc7cb3989c18cff3ebdd551562df85fb9ce5ba423fdfc0b302b3" exitCode=0 Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.342443 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" event={"ID":"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6","Type":"ContainerDied","Data":"d32a8a187a68cc7cb3989c18cff3ebdd551562df85fb9ce5ba423fdfc0b302b3"} Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.646753 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.751622 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-ca-trust-extracted\") pod \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.751665 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-trusted-ca\") pod \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.751713 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-tls\") pod \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.751864 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.751907 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-installation-pull-secrets\") pod \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.751931 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-certificates\") pod \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.751971 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-bound-sa-token\") pod \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.752023 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pv8f7\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-kube-api-access-pv8f7\") pod \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\" (UID: \"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6\") " Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.753128 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.753528 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.761172 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.761295 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.761891 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-kube-api-access-pv8f7" (OuterVolumeSpecName: "kube-api-access-pv8f7") pod "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6"). InnerVolumeSpecName "kube-api-access-pv8f7". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.764371 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.764856 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (OuterVolumeSpecName: "registry-storage") pod "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6"). InnerVolumeSpecName "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2". PluginName "kubernetes.io/csi", VolumeGIDValue "" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.773022 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" (UID: "5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.853001 5119 reconciler_common.go:299] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.853043 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-pv8f7\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-kube-api-access-pv8f7\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.853055 5119 reconciler_common.go:299] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.853066 5119 reconciler_common.go:299] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.853075 5119 reconciler_common.go:299] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.853084 5119 reconciler_common.go:299] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:48 crc kubenswrapper[5119]: I0130 00:17:48.853092 5119 reconciler_common.go:299] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 30 00:17:49 crc kubenswrapper[5119]: I0130 00:17:49.374516 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" event={"ID":"5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6","Type":"ContainerDied","Data":"bd6bf265a0de281cc350dda6da518af22a0777e0c249a0e11846caf62c4dc90f"} Jan 30 00:17:49 crc kubenswrapper[5119]: I0130 00:17:49.374622 5119 scope.go:117] "RemoveContainer" containerID="d32a8a187a68cc7cb3989c18cff3ebdd551562df85fb9ce5ba423fdfc0b302b3" Jan 30 00:17:49 crc kubenswrapper[5119]: I0130 00:17:49.374762 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66587d64c8-c5xkv" Jan 30 00:17:49 crc kubenswrapper[5119]: I0130 00:17:49.425676 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-c5xkv"] Jan 30 00:17:49 crc kubenswrapper[5119]: I0130 00:17:49.436161 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-c5xkv"] Jan 30 00:17:50 crc kubenswrapper[5119]: I0130 00:17:50.756227 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" path="/var/lib/kubelet/pods/5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6/volumes" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.144766 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495538-4rznx"] Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.147824 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" containerName="registry" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.147956 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" containerName="registry" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.148175 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="5a0a210e-8e3a-452a-b20c-a2e2f11bf3a6" containerName="registry" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.162764 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495538-4rznx"] Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.162902 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495538-4rznx" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.166907 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.167232 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-2vtgf\"" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.167581 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.248171 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz5qp\" (UniqueName: \"kubernetes.io/projected/dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1-kube-api-access-rz5qp\") pod \"auto-csr-approver-29495538-4rznx\" (UID: \"dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1\") " pod="openshift-infra/auto-csr-approver-29495538-4rznx" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.349509 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-rz5qp\" (UniqueName: \"kubernetes.io/projected/dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1-kube-api-access-rz5qp\") pod \"auto-csr-approver-29495538-4rznx\" (UID: \"dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1\") " pod="openshift-infra/auto-csr-approver-29495538-4rznx" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.377383 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz5qp\" (UniqueName: \"kubernetes.io/projected/dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1-kube-api-access-rz5qp\") pod \"auto-csr-approver-29495538-4rznx\" (UID: \"dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1\") " pod="openshift-infra/auto-csr-approver-29495538-4rznx" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.485858 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495538-4rznx" Jan 30 00:18:00 crc kubenswrapper[5119]: I0130 00:18:00.895924 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495538-4rznx"] Jan 30 00:18:00 crc kubenswrapper[5119]: W0130 00:18:00.900611 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddca26ac5_6d5c_4e6f_86b4_bc4548d28bf1.slice/crio-e96dba7a5e390e6685073e03d7fe0eb66a7e38c592254cb244101e59b8de76f8 WatchSource:0}: Error finding container e96dba7a5e390e6685073e03d7fe0eb66a7e38c592254cb244101e59b8de76f8: Status 404 returned error can't find the container with id e96dba7a5e390e6685073e03d7fe0eb66a7e38c592254cb244101e59b8de76f8 Jan 30 00:18:01 crc kubenswrapper[5119]: I0130 00:18:01.446033 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495538-4rznx" event={"ID":"dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1","Type":"ContainerStarted","Data":"e96dba7a5e390e6685073e03d7fe0eb66a7e38c592254cb244101e59b8de76f8"} Jan 30 00:18:04 crc kubenswrapper[5119]: I0130 00:18:04.414188 5119 csr.go:274] "Certificate signing request is approved, waiting to be issued" logger="kubernetes.io/kubelet-serving" csr="csr-nwjgx" Jan 30 00:18:04 crc kubenswrapper[5119]: I0130 00:18:04.438926 5119 csr.go:270] "Certificate signing request is issued" logger="kubernetes.io/kubelet-serving" csr="csr-nwjgx" Jan 30 00:18:04 crc kubenswrapper[5119]: I0130 00:18:04.463745 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495538-4rznx" event={"ID":"dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1","Type":"ContainerStarted","Data":"c437b0536f5363caf54d0562026c6425415aeb3e3104961212e0ff2b30744c71"} Jan 30 00:18:04 crc kubenswrapper[5119]: I0130 00:18:04.474834 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29495538-4rznx" podStartSLOduration=1.55137725 podStartE2EDuration="4.474815786s" podCreationTimestamp="2026-01-30 00:18:00 +0000 UTC" firstStartedPulling="2026-01-30 00:18:00.902728254 +0000 UTC m=+484.916790713" lastFinishedPulling="2026-01-30 00:18:03.826166789 +0000 UTC m=+487.840229249" observedRunningTime="2026-01-30 00:18:04.472787347 +0000 UTC m=+488.486849806" watchObservedRunningTime="2026-01-30 00:18:04.474815786 +0000 UTC m=+488.488878245" Jan 30 00:18:05 crc kubenswrapper[5119]: I0130 00:18:05.439884 5119 certificate_manager.go:715] "Certificate rotation deadline determined" logger="kubernetes.io/kubelet-serving" expiration="2026-03-01 00:13:04 +0000 UTC" deadline="2026-02-25 15:28:05.409627854 +0000 UTC" Jan 30 00:18:05 crc kubenswrapper[5119]: I0130 00:18:05.439950 5119 certificate_manager.go:431] "Waiting for next certificate rotation" logger="kubernetes.io/kubelet-serving" sleep="639h9m59.969685024s" Jan 30 00:18:05 crc kubenswrapper[5119]: I0130 00:18:05.473060 5119 generic.go:358] "Generic (PLEG): container finished" podID="dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1" containerID="c437b0536f5363caf54d0562026c6425415aeb3e3104961212e0ff2b30744c71" exitCode=0 Jan 30 00:18:05 crc kubenswrapper[5119]: I0130 00:18:05.473119 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495538-4rznx" event={"ID":"dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1","Type":"ContainerDied","Data":"c437b0536f5363caf54d0562026c6425415aeb3e3104961212e0ff2b30744c71"} Jan 30 00:18:06 crc kubenswrapper[5119]: I0130 00:18:06.440810 5119 certificate_manager.go:715] "Certificate rotation deadline determined" logger="kubernetes.io/kubelet-serving" expiration="2026-03-01 00:13:04 +0000 UTC" deadline="2026-02-20 00:13:55.572834643 +0000 UTC" Jan 30 00:18:06 crc kubenswrapper[5119]: I0130 00:18:06.440872 5119 certificate_manager.go:431] "Waiting for next certificate rotation" logger="kubernetes.io/kubelet-serving" sleep="503h55m49.131967916s" Jan 30 00:18:06 crc kubenswrapper[5119]: I0130 00:18:06.786498 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495538-4rznx" Jan 30 00:18:06 crc kubenswrapper[5119]: I0130 00:18:06.842758 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rz5qp\" (UniqueName: \"kubernetes.io/projected/dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1-kube-api-access-rz5qp\") pod \"dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1\" (UID: \"dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1\") " Jan 30 00:18:06 crc kubenswrapper[5119]: I0130 00:18:06.849723 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1-kube-api-access-rz5qp" (OuterVolumeSpecName: "kube-api-access-rz5qp") pod "dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1" (UID: "dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1"). InnerVolumeSpecName "kube-api-access-rz5qp". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:18:06 crc kubenswrapper[5119]: I0130 00:18:06.944776 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-rz5qp\" (UniqueName: \"kubernetes.io/projected/dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1-kube-api-access-rz5qp\") on node \"crc\" DevicePath \"\"" Jan 30 00:18:07 crc kubenswrapper[5119]: I0130 00:18:07.486973 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495538-4rznx" Jan 30 00:18:07 crc kubenswrapper[5119]: I0130 00:18:07.487009 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495538-4rznx" event={"ID":"dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1","Type":"ContainerDied","Data":"e96dba7a5e390e6685073e03d7fe0eb66a7e38c592254cb244101e59b8de76f8"} Jan 30 00:18:07 crc kubenswrapper[5119]: I0130 00:18:07.487037 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e96dba7a5e390e6685073e03d7fe0eb66a7e38c592254cb244101e59b8de76f8" Jan 30 00:19:24 crc kubenswrapper[5119]: I0130 00:19:24.371508 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:19:24 crc kubenswrapper[5119]: I0130 00:19:24.372244 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:19:54 crc kubenswrapper[5119]: I0130 00:19:54.371088 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:19:54 crc kubenswrapper[5119]: I0130 00:19:54.371851 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:19:56 crc kubenswrapper[5119]: I0130 00:19:56.990516 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log" Jan 30 00:19:56 crc kubenswrapper[5119]: I0130 00:19:56.994257 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.145425 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495540-ldthh"] Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.145948 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1" containerName="oc" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.145963 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1" containerName="oc" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.146067 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1" containerName="oc" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.153927 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495540-ldthh" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.154675 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495540-ldthh"] Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.155995 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.156003 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-2vtgf\"" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.158018 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.237300 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79jkj\" (UniqueName: \"kubernetes.io/projected/67bc7b41-e975-46b0-acfe-1e4ad46c6679-kube-api-access-79jkj\") pod \"auto-csr-approver-29495540-ldthh\" (UID: \"67bc7b41-e975-46b0-acfe-1e4ad46c6679\") " pod="openshift-infra/auto-csr-approver-29495540-ldthh" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.338578 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-79jkj\" (UniqueName: \"kubernetes.io/projected/67bc7b41-e975-46b0-acfe-1e4ad46c6679-kube-api-access-79jkj\") pod \"auto-csr-approver-29495540-ldthh\" (UID: \"67bc7b41-e975-46b0-acfe-1e4ad46c6679\") " pod="openshift-infra/auto-csr-approver-29495540-ldthh" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.359917 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-79jkj\" (UniqueName: \"kubernetes.io/projected/67bc7b41-e975-46b0-acfe-1e4ad46c6679-kube-api-access-79jkj\") pod \"auto-csr-approver-29495540-ldthh\" (UID: \"67bc7b41-e975-46b0-acfe-1e4ad46c6679\") " pod="openshift-infra/auto-csr-approver-29495540-ldthh" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.477216 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495540-ldthh" Jan 30 00:20:00 crc kubenswrapper[5119]: I0130 00:20:00.679135 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495540-ldthh"] Jan 30 00:20:00 crc kubenswrapper[5119]: W0130 00:20:00.685714 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67bc7b41_e975_46b0_acfe_1e4ad46c6679.slice/crio-98906555ac1caa23a80f0e93df9f9d4a32e84d92a7a52b644bef73e2fb69ce02 WatchSource:0}: Error finding container 98906555ac1caa23a80f0e93df9f9d4a32e84d92a7a52b644bef73e2fb69ce02: Status 404 returned error can't find the container with id 98906555ac1caa23a80f0e93df9f9d4a32e84d92a7a52b644bef73e2fb69ce02 Jan 30 00:20:01 crc kubenswrapper[5119]: I0130 00:20:01.186221 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495540-ldthh" event={"ID":"67bc7b41-e975-46b0-acfe-1e4ad46c6679","Type":"ContainerStarted","Data":"98906555ac1caa23a80f0e93df9f9d4a32e84d92a7a52b644bef73e2fb69ce02"} Jan 30 00:20:02 crc kubenswrapper[5119]: I0130 00:20:02.192613 5119 generic.go:358] "Generic (PLEG): container finished" podID="67bc7b41-e975-46b0-acfe-1e4ad46c6679" containerID="8ebf90ce66a36b561e345c80cfcbf6257ac7f7696717432e8f4f1aafd604dab9" exitCode=0 Jan 30 00:20:02 crc kubenswrapper[5119]: I0130 00:20:02.192672 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495540-ldthh" event={"ID":"67bc7b41-e975-46b0-acfe-1e4ad46c6679","Type":"ContainerDied","Data":"8ebf90ce66a36b561e345c80cfcbf6257ac7f7696717432e8f4f1aafd604dab9"} Jan 30 00:20:03 crc kubenswrapper[5119]: I0130 00:20:03.401629 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495540-ldthh" Jan 30 00:20:03 crc kubenswrapper[5119]: I0130 00:20:03.487442 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79jkj\" (UniqueName: \"kubernetes.io/projected/67bc7b41-e975-46b0-acfe-1e4ad46c6679-kube-api-access-79jkj\") pod \"67bc7b41-e975-46b0-acfe-1e4ad46c6679\" (UID: \"67bc7b41-e975-46b0-acfe-1e4ad46c6679\") " Jan 30 00:20:03 crc kubenswrapper[5119]: I0130 00:20:03.493540 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67bc7b41-e975-46b0-acfe-1e4ad46c6679-kube-api-access-79jkj" (OuterVolumeSpecName: "kube-api-access-79jkj") pod "67bc7b41-e975-46b0-acfe-1e4ad46c6679" (UID: "67bc7b41-e975-46b0-acfe-1e4ad46c6679"). InnerVolumeSpecName "kube-api-access-79jkj". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:20:03 crc kubenswrapper[5119]: I0130 00:20:03.588778 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-79jkj\" (UniqueName: \"kubernetes.io/projected/67bc7b41-e975-46b0-acfe-1e4ad46c6679-kube-api-access-79jkj\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:04 crc kubenswrapper[5119]: I0130 00:20:04.204535 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495540-ldthh" Jan 30 00:20:04 crc kubenswrapper[5119]: I0130 00:20:04.204531 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495540-ldthh" event={"ID":"67bc7b41-e975-46b0-acfe-1e4ad46c6679","Type":"ContainerDied","Data":"98906555ac1caa23a80f0e93df9f9d4a32e84d92a7a52b644bef73e2fb69ce02"} Jan 30 00:20:04 crc kubenswrapper[5119]: I0130 00:20:04.204764 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98906555ac1caa23a80f0e93df9f9d4a32e84d92a7a52b644bef73e2fb69ce02" Jan 30 00:20:24 crc kubenswrapper[5119]: I0130 00:20:24.371571 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:20:24 crc kubenswrapper[5119]: I0130 00:20:24.372252 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:20:24 crc kubenswrapper[5119]: I0130 00:20:24.372358 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:20:24 crc kubenswrapper[5119]: I0130 00:20:24.373494 5119 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"acb18e93ac4afc8e87c29f0f393415c46b320cde48c74a568ec3f5fc1b3d28d2"} pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:20:24 crc kubenswrapper[5119]: I0130 00:20:24.373591 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" containerID="cri-o://acb18e93ac4afc8e87c29f0f393415c46b320cde48c74a568ec3f5fc1b3d28d2" gracePeriod=600 Jan 30 00:20:24 crc kubenswrapper[5119]: I0130 00:20:24.508327 5119 provider.go:93] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 00:20:25 crc kubenswrapper[5119]: I0130 00:20:25.337688 5119 generic.go:358] "Generic (PLEG): container finished" podID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerID="acb18e93ac4afc8e87c29f0f393415c46b320cde48c74a568ec3f5fc1b3d28d2" exitCode=0 Jan 30 00:20:25 crc kubenswrapper[5119]: I0130 00:20:25.337737 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerDied","Data":"acb18e93ac4afc8e87c29f0f393415c46b320cde48c74a568ec3f5fc1b3d28d2"} Jan 30 00:20:25 crc kubenswrapper[5119]: I0130 00:20:25.338500 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerStarted","Data":"26b3207f3ca251d191e8246615fc9445ba3613afe23d646d0e32410e7bcd59ac"} Jan 30 00:20:25 crc kubenswrapper[5119]: I0130 00:20:25.338549 5119 scope.go:117] "RemoveContainer" containerID="2293b955c1384c8479b87ad35b70303afcaed7f3f538d92a41b347eff7768adf" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.193654 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75"] Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.195236 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" podUID="976aa95a-addb-4ae4-9ec8-efd0863c66af" containerName="kube-rbac-proxy" containerID="cri-o://778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d" gracePeriod=30 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.195462 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" podUID="976aa95a-addb-4ae4-9ec8-efd0863c66af" containerName="ovnkube-cluster-manager" containerID="cri-o://8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd" gracePeriod=30 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.394080 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.404942 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nwvqg"] Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.405616 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovn-controller" containerID="cri-o://eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4" gracePeriod=30 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.405712 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="northd" containerID="cri-o://64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db" gracePeriod=30 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.405829 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="sbdb" containerID="cri-o://3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d" gracePeriod=30 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.405722 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7" gracePeriod=30 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.405722 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovn-acl-logging" containerID="cri-o://a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d" gracePeriod=30 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.405727 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="kube-rbac-proxy-node" containerID="cri-o://b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68" gracePeriod=30 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.405773 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="nbdb" containerID="cri-o://a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f" gracePeriod=30 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.445809 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp"] Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.446722 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="976aa95a-addb-4ae4-9ec8-efd0863c66af" containerName="kube-rbac-proxy" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.446805 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="976aa95a-addb-4ae4-9ec8-efd0863c66af" containerName="kube-rbac-proxy" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.446873 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="67bc7b41-e975-46b0-acfe-1e4ad46c6679" containerName="oc" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.446926 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="67bc7b41-e975-46b0-acfe-1e4ad46c6679" containerName="oc" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.447002 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="976aa95a-addb-4ae4-9ec8-efd0863c66af" containerName="ovnkube-cluster-manager" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.447059 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="976aa95a-addb-4ae4-9ec8-efd0863c66af" containerName="ovnkube-cluster-manager" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.447227 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="976aa95a-addb-4ae4-9ec8-efd0863c66af" containerName="ovnkube-cluster-manager" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.447296 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="67bc7b41-e975-46b0-acfe-1e4ad46c6679" containerName="oc" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.447363 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="976aa95a-addb-4ae4-9ec8-efd0863c66af" containerName="kube-rbac-proxy" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.451425 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.469917 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovnkube-controller" containerID="cri-o://70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb" gracePeriod=30 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.488484 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qznm\" (UniqueName: \"kubernetes.io/projected/976aa95a-addb-4ae4-9ec8-efd0863c66af-kube-api-access-4qznm\") pod \"976aa95a-addb-4ae4-9ec8-efd0863c66af\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.492296 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-env-overrides\") pod \"976aa95a-addb-4ae4-9ec8-efd0863c66af\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.492407 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovnkube-config\") pod \"976aa95a-addb-4ae4-9ec8-efd0863c66af\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.492512 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovn-control-plane-metrics-cert\") pod \"976aa95a-addb-4ae4-9ec8-efd0863c66af\" (UID: \"976aa95a-addb-4ae4-9ec8-efd0863c66af\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.493578 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "976aa95a-addb-4ae4-9ec8-efd0863c66af" (UID: "976aa95a-addb-4ae4-9ec8-efd0863c66af"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.493766 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "976aa95a-addb-4ae4-9ec8-efd0863c66af" (UID: "976aa95a-addb-4ae4-9ec8-efd0863c66af"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.498000 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/976aa95a-addb-4ae4-9ec8-efd0863c66af-kube-api-access-4qznm" (OuterVolumeSpecName: "kube-api-access-4qznm") pod "976aa95a-addb-4ae4-9ec8-efd0863c66af" (UID: "976aa95a-addb-4ae4-9ec8-efd0863c66af"). InnerVolumeSpecName "kube-api-access-4qznm". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.498582 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "976aa95a-addb-4ae4-9ec8-efd0863c66af" (UID: "976aa95a-addb-4ae4-9ec8-efd0863c66af"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.594515 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/79052a33-2155-4ebf-b96c-2f27a1d76ab6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.594572 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/79052a33-2155-4ebf-b96c-2f27a1d76ab6-ovnkube-config\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.594602 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pmw8\" (UniqueName: \"kubernetes.io/projected/79052a33-2155-4ebf-b96c-2f27a1d76ab6-kube-api-access-4pmw8\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.594710 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/79052a33-2155-4ebf-b96c-2f27a1d76ab6-env-overrides\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.595687 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4qznm\" (UniqueName: \"kubernetes.io/projected/976aa95a-addb-4ae4-9ec8-efd0863c66af-kube-api-access-4qznm\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.595715 5119 reconciler_common.go:299] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.595725 5119 reconciler_common.go:299] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.595734 5119 reconciler_common.go:299] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/976aa95a-addb-4ae4-9ec8-efd0863c66af-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.674584 5119 generic.go:358] "Generic (PLEG): container finished" podID="976aa95a-addb-4ae4-9ec8-efd0863c66af" containerID="8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd" exitCode=0 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.674612 5119 generic.go:358] "Generic (PLEG): container finished" podID="976aa95a-addb-4ae4-9ec8-efd0863c66af" containerID="778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d" exitCode=0 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.674641 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" event={"ID":"976aa95a-addb-4ae4-9ec8-efd0863c66af","Type":"ContainerDied","Data":"8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd"} Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.674719 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" event={"ID":"976aa95a-addb-4ae4-9ec8-efd0863c66af","Type":"ContainerDied","Data":"778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d"} Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.674734 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" event={"ID":"976aa95a-addb-4ae4-9ec8-efd0863c66af","Type":"ContainerDied","Data":"63a3aed84087fd31e4f22ce50456603c6699c61f29660009544c63ff9a536a81"} Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.674755 5119 scope.go:117] "RemoveContainer" containerID="8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.674669 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.681554 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nwvqg_7337c888-01aa-4a6b-b494-7a51eff39634/ovn-acl-logging/0.log" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682004 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nwvqg_7337c888-01aa-4a6b-b494-7a51eff39634/ovn-controller/0.log" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682262 5119 generic.go:358] "Generic (PLEG): container finished" podID="7337c888-01aa-4a6b-b494-7a51eff39634" containerID="a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f" exitCode=0 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682289 5119 generic.go:358] "Generic (PLEG): container finished" podID="7337c888-01aa-4a6b-b494-7a51eff39634" containerID="c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7" exitCode=0 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682298 5119 generic.go:358] "Generic (PLEG): container finished" podID="7337c888-01aa-4a6b-b494-7a51eff39634" containerID="b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68" exitCode=0 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682306 5119 generic.go:358] "Generic (PLEG): container finished" podID="7337c888-01aa-4a6b-b494-7a51eff39634" containerID="a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d" exitCode=143 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682314 5119 generic.go:358] "Generic (PLEG): container finished" podID="7337c888-01aa-4a6b-b494-7a51eff39634" containerID="eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4" exitCode=143 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682362 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerDied","Data":"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f"} Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682385 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerDied","Data":"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7"} Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682433 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerDied","Data":"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68"} Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682445 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerDied","Data":"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d"} Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.682458 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerDied","Data":"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4"} Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.684182 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qxpww_0cf99dcb-47cd-4077-9fb1-e39bf209e431/kube-multus/0.log" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.684209 5119 generic.go:358] "Generic (PLEG): container finished" podID="0cf99dcb-47cd-4077-9fb1-e39bf209e431" containerID="0c105e9976984cb6d41e14b3c489d698232bb451fdc0dbc82cb48a1e367f105b" exitCode=2 Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.684278 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qxpww" event={"ID":"0cf99dcb-47cd-4077-9fb1-e39bf209e431","Type":"ContainerDied","Data":"0c105e9976984cb6d41e14b3c489d698232bb451fdc0dbc82cb48a1e367f105b"} Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.684857 5119 scope.go:117] "RemoveContainer" containerID="0c105e9976984cb6d41e14b3c489d698232bb451fdc0dbc82cb48a1e367f105b" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.696436 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/79052a33-2155-4ebf-b96c-2f27a1d76ab6-ovnkube-config\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.696531 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-4pmw8\" (UniqueName: \"kubernetes.io/projected/79052a33-2155-4ebf-b96c-2f27a1d76ab6-kube-api-access-4pmw8\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.696615 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/79052a33-2155-4ebf-b96c-2f27a1d76ab6-env-overrides\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.697201 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/79052a33-2155-4ebf-b96c-2f27a1d76ab6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.697290 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/79052a33-2155-4ebf-b96c-2f27a1d76ab6-env-overrides\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.699185 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/79052a33-2155-4ebf-b96c-2f27a1d76ab6-ovnkube-config\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.703038 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/79052a33-2155-4ebf-b96c-2f27a1d76ab6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.711220 5119 scope.go:117] "RemoveContainer" containerID="778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.718690 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pmw8\" (UniqueName: \"kubernetes.io/projected/79052a33-2155-4ebf-b96c-2f27a1d76ab6-kube-api-access-4pmw8\") pod \"ovnkube-control-plane-97c9b6c48-6x5gp\" (UID: \"79052a33-2155-4ebf-b96c-2f27a1d76ab6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.719913 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75"] Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.722927 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-gsh75"] Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.727460 5119 scope.go:117] "RemoveContainer" containerID="8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd" Jan 30 00:21:16 crc kubenswrapper[5119]: E0130 00:21:16.727992 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd\": container with ID starting with 8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd not found: ID does not exist" containerID="8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.728037 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd"} err="failed to get container status \"8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd\": rpc error: code = NotFound desc = could not find container \"8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd\": container with ID starting with 8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd not found: ID does not exist" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.728062 5119 scope.go:117] "RemoveContainer" containerID="778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d" Jan 30 00:21:16 crc kubenswrapper[5119]: E0130 00:21:16.728468 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d\": container with ID starting with 778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d not found: ID does not exist" containerID="778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.728513 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d"} err="failed to get container status \"778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d\": rpc error: code = NotFound desc = could not find container \"778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d\": container with ID starting with 778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d not found: ID does not exist" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.728538 5119 scope.go:117] "RemoveContainer" containerID="8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.729063 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd"} err="failed to get container status \"8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd\": rpc error: code = NotFound desc = could not find container \"8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd\": container with ID starting with 8a8768e0a41524baf86c003184145dfe45e8b6cc8b2b2bcea3b7413234cd38fd not found: ID does not exist" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.729091 5119 scope.go:117] "RemoveContainer" containerID="778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.729445 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d"} err="failed to get container status \"778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d\": rpc error: code = NotFound desc = could not find container \"778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d\": container with ID starting with 778eea4be711478b6cf0ec6d7e64bf183d6f34560611de526b54bb1267590c9d not found: ID does not exist" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.764457 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="976aa95a-addb-4ae4-9ec8-efd0863c66af" path="/var/lib/kubelet/pods/976aa95a-addb-4ae4-9ec8-efd0863c66af/volumes" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.771540 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nwvqg_7337c888-01aa-4a6b-b494-7a51eff39634/ovn-acl-logging/0.log" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.772250 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nwvqg_7337c888-01aa-4a6b-b494-7a51eff39634/ovn-controller/0.log" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.772708 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.792504 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.827461 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-k558t"] Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828304 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="sbdb" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828326 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="sbdb" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828338 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovnkube-controller" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828347 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovnkube-controller" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828374 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="nbdb" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828383 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="nbdb" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828464 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovn-controller" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828475 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovn-controller" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828496 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="kube-rbac-proxy-node" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828505 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="kube-rbac-proxy-node" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828539 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovn-acl-logging" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828547 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovn-acl-logging" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828562 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="kubecfg-setup" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828570 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="kubecfg-setup" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828585 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="kube-rbac-proxy-ovn-metrics" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828613 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="kube-rbac-proxy-ovn-metrics" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828636 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="northd" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828643 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="northd" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828789 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovn-acl-logging" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828805 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovn-controller" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828817 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="kube-rbac-proxy-ovn-metrics" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828825 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="northd" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828855 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="ovnkube-controller" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828864 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="nbdb" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828872 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="sbdb" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.828880 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" containerName="kube-rbac-proxy-node" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.832958 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.898899 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-script-lib\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899079 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-log-socket\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899114 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-openvswitch\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899148 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-node-log\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899209 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-kubelet\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899210 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-log-socket" (OuterVolumeSpecName: "log-socket") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899258 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-node-log" (OuterVolumeSpecName: "node-log") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899289 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899258 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899274 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4qqn\" (UniqueName: \"kubernetes.io/projected/7337c888-01aa-4a6b-b494-7a51eff39634-kube-api-access-r4qqn\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899403 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-slash\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899430 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-netd\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899525 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-systemd\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899568 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-config\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899580 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-bin\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899599 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-ovn-kubernetes\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899627 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-var-lib-openvswitch\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899656 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-var-lib-cni-networks-ovn-kubernetes\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899672 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-env-overrides\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899689 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-ovn\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899708 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-netns\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899720 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-systemd-units\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899742 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7337c888-01aa-4a6b-b494-7a51eff39634-ovn-node-metrics-cert\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899774 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-etc-openvswitch\") pod \"7337c888-01aa-4a6b-b494-7a51eff39634\" (UID: \"7337c888-01aa-4a6b-b494-7a51eff39634\") " Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899912 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899958 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899980 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-slash" (OuterVolumeSpecName: "host-slash") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.899998 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900151 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900161 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900185 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900195 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900203 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900222 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900220 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900451 5119 reconciler_common.go:299] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900469 5119 reconciler_common.go:299] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900482 5119 reconciler_common.go:299] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-log-socket\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900492 5119 reconciler_common.go:299] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900503 5119 reconciler_common.go:299] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-node-log\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900514 5119 reconciler_common.go:299] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900524 5119 reconciler_common.go:299] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-slash\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900534 5119 reconciler_common.go:299] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900544 5119 reconciler_common.go:299] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900555 5119 reconciler_common.go:299] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900567 5119 reconciler_common.go:299] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900577 5119 reconciler_common.go:299] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900588 5119 reconciler_common.go:299] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900598 5119 reconciler_common.go:299] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900613 5119 reconciler_common.go:299] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900612 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.900678 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.903929 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7337c888-01aa-4a6b-b494-7a51eff39634-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.904372 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7337c888-01aa-4a6b-b494-7a51eff39634-kube-api-access-r4qqn" (OuterVolumeSpecName: "kube-api-access-r4qqn") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "kube-api-access-r4qqn". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:21:16 crc kubenswrapper[5119]: I0130 00:21:16.911161 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "7337c888-01aa-4a6b-b494-7a51eff39634" (UID: "7337c888-01aa-4a6b-b494-7a51eff39634"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.001807 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-run-ovn\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.001855 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-systemd-units\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.001887 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4mbz\" (UniqueName: \"kubernetes.io/projected/7f0802f1-a806-48cf-819e-27e152f93af2-kube-api-access-c4mbz\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.001905 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-etc-openvswitch\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.001925 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-slash\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.001945 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-run-systemd\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.001964 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7f0802f1-a806-48cf-819e-27e152f93af2-ovnkube-script-lib\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002009 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7f0802f1-a806-48cf-819e-27e152f93af2-ovn-node-metrics-cert\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002035 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-run-openvswitch\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002058 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7f0802f1-a806-48cf-819e-27e152f93af2-env-overrides\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002073 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-var-lib-openvswitch\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002107 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-run-ovn-kubernetes\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002125 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-run-netns\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002143 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002164 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-cni-netd\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002185 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-kubelet\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002202 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7f0802f1-a806-48cf-819e-27e152f93af2-ovnkube-config\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002220 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-node-log\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002237 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-cni-bin\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002258 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-log-socket\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002288 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-r4qqn\" (UniqueName: \"kubernetes.io/projected/7337c888-01aa-4a6b-b494-7a51eff39634-kube-api-access-r4qqn\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002298 5119 reconciler_common.go:299] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7337c888-01aa-4a6b-b494-7a51eff39634-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002309 5119 reconciler_common.go:299] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002317 5119 reconciler_common.go:299] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7337c888-01aa-4a6b-b494-7a51eff39634-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.002326 5119 reconciler_common.go:299] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7337c888-01aa-4a6b-b494-7a51eff39634-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.103491 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-c4mbz\" (UniqueName: \"kubernetes.io/projected/7f0802f1-a806-48cf-819e-27e152f93af2-kube-api-access-c4mbz\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.103726 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-etc-openvswitch\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.103764 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-slash\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.103806 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-run-systemd\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.103832 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7f0802f1-a806-48cf-819e-27e152f93af2-ovnkube-script-lib\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.103867 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7f0802f1-a806-48cf-819e-27e152f93af2-ovn-node-metrics-cert\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.103897 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-run-openvswitch\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.103890 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-etc-openvswitch\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.103928 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-run-systemd\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.103999 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-run-openvswitch\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104022 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-slash\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104081 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7f0802f1-a806-48cf-819e-27e152f93af2-env-overrides\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104191 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-var-lib-openvswitch\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104287 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-var-lib-openvswitch\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104300 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-run-ovn-kubernetes\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104335 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-run-ovn-kubernetes\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104349 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-run-netns\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104415 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104460 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-cni-netd\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104499 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-run-netns\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104538 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-kubelet\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104566 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-cni-netd\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104542 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104576 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7f0802f1-a806-48cf-819e-27e152f93af2-ovnkube-config\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104638 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-kubelet\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104752 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7f0802f1-a806-48cf-819e-27e152f93af2-env-overrides\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104794 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-node-log\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104817 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-cni-bin\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104847 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-log-socket\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104852 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-node-log\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104865 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-run-ovn\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104883 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-host-cni-bin\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104885 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-systemd-units\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104903 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-systemd-units\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104926 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-run-ovn\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.104947 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7f0802f1-a806-48cf-819e-27e152f93af2-log-socket\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.105506 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7f0802f1-a806-48cf-819e-27e152f93af2-ovnkube-config\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.106248 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7f0802f1-a806-48cf-819e-27e152f93af2-ovnkube-script-lib\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.107071 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7f0802f1-a806-48cf-819e-27e152f93af2-ovn-node-metrics-cert\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.125635 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4mbz\" (UniqueName: \"kubernetes.io/projected/7f0802f1-a806-48cf-819e-27e152f93af2-kube-api-access-c4mbz\") pod \"ovnkube-node-k558t\" (UID: \"7f0802f1-a806-48cf-819e-27e152f93af2\") " pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.152813 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.693548 5119 generic.go:358] "Generic (PLEG): container finished" podID="7f0802f1-a806-48cf-819e-27e152f93af2" containerID="cf6a05d84fb8be5adbe9145de92ea328c0adce719b15ac54729c4a390b73cdcd" exitCode=0 Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.693613 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" event={"ID":"7f0802f1-a806-48cf-819e-27e152f93af2","Type":"ContainerDied","Data":"cf6a05d84fb8be5adbe9145de92ea328c0adce719b15ac54729c4a390b73cdcd"} Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.693657 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" event={"ID":"7f0802f1-a806-48cf-819e-27e152f93af2","Type":"ContainerStarted","Data":"0c792cbf960ba5e5f5a99ef6c2f9dcb2405880c4581294bb5153ca39628e3771"} Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.699746 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nwvqg_7337c888-01aa-4a6b-b494-7a51eff39634/ovn-acl-logging/0.log" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.701178 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nwvqg_7337c888-01aa-4a6b-b494-7a51eff39634/ovn-controller/0.log" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.701781 5119 generic.go:358] "Generic (PLEG): container finished" podID="7337c888-01aa-4a6b-b494-7a51eff39634" containerID="70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb" exitCode=0 Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.701837 5119 generic.go:358] "Generic (PLEG): container finished" podID="7337c888-01aa-4a6b-b494-7a51eff39634" containerID="3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d" exitCode=0 Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.701854 5119 generic.go:358] "Generic (PLEG): container finished" podID="7337c888-01aa-4a6b-b494-7a51eff39634" containerID="64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db" exitCode=0 Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.702262 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.702935 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerDied","Data":"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb"} Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.702997 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerDied","Data":"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d"} Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.703015 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerDied","Data":"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db"} Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.703032 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nwvqg" event={"ID":"7337c888-01aa-4a6b-b494-7a51eff39634","Type":"ContainerDied","Data":"f23c029375f63319b7e9208cc19fd212e1bd325e2d7139da03db451166d0a71a"} Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.703058 5119 scope.go:117] "RemoveContainer" containerID="70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.715565 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qxpww_0cf99dcb-47cd-4077-9fb1-e39bf209e431/kube-multus/0.log" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.715777 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qxpww" event={"ID":"0cf99dcb-47cd-4077-9fb1-e39bf209e431","Type":"ContainerStarted","Data":"2cca6ef60d87170fec22e1da1246d4f1fd1a58fd05ad855d93d165e3739d4c02"} Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.718146 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" event={"ID":"79052a33-2155-4ebf-b96c-2f27a1d76ab6","Type":"ContainerStarted","Data":"1b7b8ea865bcbf71db45337304f5df8e240ba590b861df0ac1c2c8d736faa179"} Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.718189 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" event={"ID":"79052a33-2155-4ebf-b96c-2f27a1d76ab6","Type":"ContainerStarted","Data":"9384f5efa757d61d5684042e2e128cfab2f1dba899f2c1d71688bcc25f390d0b"} Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.718204 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" event={"ID":"79052a33-2155-4ebf-b96c-2f27a1d76ab6","Type":"ContainerStarted","Data":"58a0caf0e1ea50c8d77904271d876d2966bd22ed373dc76b4ac10bcdefd65fb8"} Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.733836 5119 scope.go:117] "RemoveContainer" containerID="3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.760551 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-6x5gp" podStartSLOduration=1.760520782 podStartE2EDuration="1.760520782s" podCreationTimestamp="2026-01-30 00:21:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:21:17.75141541 +0000 UTC m=+681.765477889" watchObservedRunningTime="2026-01-30 00:21:17.760520782 +0000 UTC m=+681.774583251" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.766682 5119 scope.go:117] "RemoveContainer" containerID="a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.802417 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nwvqg"] Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.806862 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nwvqg"] Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.819353 5119 scope.go:117] "RemoveContainer" containerID="64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.844054 5119 scope.go:117] "RemoveContainer" containerID="c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.860745 5119 scope.go:117] "RemoveContainer" containerID="b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.878866 5119 scope.go:117] "RemoveContainer" containerID="a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.894076 5119 scope.go:117] "RemoveContainer" containerID="eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.919234 5119 scope.go:117] "RemoveContainer" containerID="d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.941484 5119 scope.go:117] "RemoveContainer" containerID="70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb" Jan 30 00:21:17 crc kubenswrapper[5119]: E0130 00:21:17.942186 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb\": container with ID starting with 70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb not found: ID does not exist" containerID="70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.942217 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb"} err="failed to get container status \"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb\": rpc error: code = NotFound desc = could not find container \"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb\": container with ID starting with 70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.942238 5119 scope.go:117] "RemoveContainer" containerID="3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d" Jan 30 00:21:17 crc kubenswrapper[5119]: E0130 00:21:17.942633 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d\": container with ID starting with 3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d not found: ID does not exist" containerID="3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.942652 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d"} err="failed to get container status \"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d\": rpc error: code = NotFound desc = could not find container \"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d\": container with ID starting with 3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.942663 5119 scope.go:117] "RemoveContainer" containerID="a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f" Jan 30 00:21:17 crc kubenswrapper[5119]: E0130 00:21:17.943038 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f\": container with ID starting with a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f not found: ID does not exist" containerID="a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.943056 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f"} err="failed to get container status \"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f\": rpc error: code = NotFound desc = could not find container \"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f\": container with ID starting with a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.943068 5119 scope.go:117] "RemoveContainer" containerID="64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db" Jan 30 00:21:17 crc kubenswrapper[5119]: E0130 00:21:17.943298 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db\": container with ID starting with 64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db not found: ID does not exist" containerID="64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.943314 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db"} err="failed to get container status \"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db\": rpc error: code = NotFound desc = could not find container \"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db\": container with ID starting with 64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.943327 5119 scope.go:117] "RemoveContainer" containerID="c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7" Jan 30 00:21:17 crc kubenswrapper[5119]: E0130 00:21:17.943628 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7\": container with ID starting with c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7 not found: ID does not exist" containerID="c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.943646 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7"} err="failed to get container status \"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7\": rpc error: code = NotFound desc = could not find container \"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7\": container with ID starting with c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7 not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.943658 5119 scope.go:117] "RemoveContainer" containerID="b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68" Jan 30 00:21:17 crc kubenswrapper[5119]: E0130 00:21:17.943992 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68\": container with ID starting with b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68 not found: ID does not exist" containerID="b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.944009 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68"} err="failed to get container status \"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68\": rpc error: code = NotFound desc = could not find container \"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68\": container with ID starting with b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68 not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.944023 5119 scope.go:117] "RemoveContainer" containerID="a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d" Jan 30 00:21:17 crc kubenswrapper[5119]: E0130 00:21:17.944275 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d\": container with ID starting with a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d not found: ID does not exist" containerID="a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.944289 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d"} err="failed to get container status \"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d\": rpc error: code = NotFound desc = could not find container \"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d\": container with ID starting with a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.944299 5119 scope.go:117] "RemoveContainer" containerID="eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4" Jan 30 00:21:17 crc kubenswrapper[5119]: E0130 00:21:17.944866 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4\": container with ID starting with eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4 not found: ID does not exist" containerID="eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.944886 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4"} err="failed to get container status \"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4\": rpc error: code = NotFound desc = could not find container \"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4\": container with ID starting with eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4 not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.944900 5119 scope.go:117] "RemoveContainer" containerID="d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb" Jan 30 00:21:17 crc kubenswrapper[5119]: E0130 00:21:17.945169 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb\": container with ID starting with d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb not found: ID does not exist" containerID="d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.945185 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb"} err="failed to get container status \"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb\": rpc error: code = NotFound desc = could not find container \"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb\": container with ID starting with d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.945196 5119 scope.go:117] "RemoveContainer" containerID="70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.945484 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb"} err="failed to get container status \"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb\": rpc error: code = NotFound desc = could not find container \"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb\": container with ID starting with 70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.945499 5119 scope.go:117] "RemoveContainer" containerID="3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.945698 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d"} err="failed to get container status \"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d\": rpc error: code = NotFound desc = could not find container \"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d\": container with ID starting with 3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.945712 5119 scope.go:117] "RemoveContainer" containerID="a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.945911 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f"} err="failed to get container status \"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f\": rpc error: code = NotFound desc = could not find container \"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f\": container with ID starting with a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.945926 5119 scope.go:117] "RemoveContainer" containerID="64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.946197 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db"} err="failed to get container status \"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db\": rpc error: code = NotFound desc = could not find container \"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db\": container with ID starting with 64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.946213 5119 scope.go:117] "RemoveContainer" containerID="c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.946767 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7"} err="failed to get container status \"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7\": rpc error: code = NotFound desc = could not find container \"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7\": container with ID starting with c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7 not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.946789 5119 scope.go:117] "RemoveContainer" containerID="b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.947085 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68"} err="failed to get container status \"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68\": rpc error: code = NotFound desc = could not find container \"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68\": container with ID starting with b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68 not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.947104 5119 scope.go:117] "RemoveContainer" containerID="a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.947302 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d"} err="failed to get container status \"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d\": rpc error: code = NotFound desc = could not find container \"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d\": container with ID starting with a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.947316 5119 scope.go:117] "RemoveContainer" containerID="eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.947630 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4"} err="failed to get container status \"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4\": rpc error: code = NotFound desc = could not find container \"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4\": container with ID starting with eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4 not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.947644 5119 scope.go:117] "RemoveContainer" containerID="d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.947868 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb"} err="failed to get container status \"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb\": rpc error: code = NotFound desc = could not find container \"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb\": container with ID starting with d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.947887 5119 scope.go:117] "RemoveContainer" containerID="70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.948152 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb"} err="failed to get container status \"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb\": rpc error: code = NotFound desc = could not find container \"70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb\": container with ID starting with 70586d2c13054d3efbd35313ab7dfe5316156db57e395a17e75510e9a0bd06fb not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.948166 5119 scope.go:117] "RemoveContainer" containerID="3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.950660 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d"} err="failed to get container status \"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d\": rpc error: code = NotFound desc = could not find container \"3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d\": container with ID starting with 3596e4cf8b2f675032a3c576393916d4598c52f13913806f257871d37fbadc8d not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.950723 5119 scope.go:117] "RemoveContainer" containerID="a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.951082 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f"} err="failed to get container status \"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f\": rpc error: code = NotFound desc = could not find container \"a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f\": container with ID starting with a2e604daf0af2e10ae1fb7b84957b8795c8d2038c4acf3ae116acaf589983b6f not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.951097 5119 scope.go:117] "RemoveContainer" containerID="64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.951369 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db"} err="failed to get container status \"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db\": rpc error: code = NotFound desc = could not find container \"64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db\": container with ID starting with 64ed18a3c33c19bb05c7f00f70f83999b0c964eead1013c3c913c4d8f83407db not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.951403 5119 scope.go:117] "RemoveContainer" containerID="c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.951592 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7"} err="failed to get container status \"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7\": rpc error: code = NotFound desc = could not find container \"c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7\": container with ID starting with c7cb63182247a28ffbe525e9ec937f722926d6197fdd0bc2dfe1ea322494dfc7 not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.951611 5119 scope.go:117] "RemoveContainer" containerID="b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.952075 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68"} err="failed to get container status \"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68\": rpc error: code = NotFound desc = could not find container \"b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68\": container with ID starting with b11ba141c432029975bad034b3b5bd5b6b56ed3da4affa3112a9103678d57b68 not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.952179 5119 scope.go:117] "RemoveContainer" containerID="a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.953029 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d"} err="failed to get container status \"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d\": rpc error: code = NotFound desc = could not find container \"a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d\": container with ID starting with a099bb32c5be73776101730898b95d69e821e68aa13e83cf424effe46c56967d not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.953053 5119 scope.go:117] "RemoveContainer" containerID="eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.953763 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4"} err="failed to get container status \"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4\": rpc error: code = NotFound desc = could not find container \"eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4\": container with ID starting with eb2e243aa390db8bc6139b7c55a65985ef336749acb892040d3bd74a224654e4 not found: ID does not exist" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.954842 5119 scope.go:117] "RemoveContainer" containerID="d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb" Jan 30 00:21:17 crc kubenswrapper[5119]: I0130 00:21:17.955339 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb"} err="failed to get container status \"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb\": rpc error: code = NotFound desc = could not find container \"d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb\": container with ID starting with d90776256225a0c90ee3bb6d7cbb3dfeb69d2e786fc18c5776b58410260b59cb not found: ID does not exist" Jan 30 00:21:18 crc kubenswrapper[5119]: I0130 00:21:18.731114 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" event={"ID":"7f0802f1-a806-48cf-819e-27e152f93af2","Type":"ContainerStarted","Data":"5ab5f5c7d85bd986acbb0fa4ed63403a0a89c633157640429766b1a978dc7301"} Jan 30 00:21:18 crc kubenswrapper[5119]: I0130 00:21:18.732200 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" event={"ID":"7f0802f1-a806-48cf-819e-27e152f93af2","Type":"ContainerStarted","Data":"4852be89d245fa1e2ffc7afb6d7f4807ccbb262aa9879afaa1ca8f96bd179113"} Jan 30 00:21:18 crc kubenswrapper[5119]: I0130 00:21:18.732502 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" event={"ID":"7f0802f1-a806-48cf-819e-27e152f93af2","Type":"ContainerStarted","Data":"b9b41c2912975ecb62707545fed7ea287d5d61071477ab825ba01cc080739ed0"} Jan 30 00:21:18 crc kubenswrapper[5119]: I0130 00:21:18.732560 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" event={"ID":"7f0802f1-a806-48cf-819e-27e152f93af2","Type":"ContainerStarted","Data":"c5eeb20f918083985bbc0c9711c22bd34b46c0568e9c47b6e2f5bbad34ba97b2"} Jan 30 00:21:18 crc kubenswrapper[5119]: I0130 00:21:18.732611 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" event={"ID":"7f0802f1-a806-48cf-819e-27e152f93af2","Type":"ContainerStarted","Data":"ac9a0b5ecce3b211e3964adc5ff0e15afe9f2b266f8799b85d81bb14e9ca264b"} Jan 30 00:21:18 crc kubenswrapper[5119]: I0130 00:21:18.732686 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" event={"ID":"7f0802f1-a806-48cf-819e-27e152f93af2","Type":"ContainerStarted","Data":"673b072a596bd9526df768d1d55a0b1222b503ac618a78737e70340f9b5fb51b"} Jan 30 00:21:18 crc kubenswrapper[5119]: I0130 00:21:18.782460 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7337c888-01aa-4a6b-b494-7a51eff39634" path="/var/lib/kubelet/pods/7337c888-01aa-4a6b-b494-7a51eff39634/volumes" Jan 30 00:21:21 crc kubenswrapper[5119]: I0130 00:21:21.756546 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" event={"ID":"7f0802f1-a806-48cf-819e-27e152f93af2","Type":"ContainerStarted","Data":"1aa51b722b977ca6d9cfd127dbc295ec9e36c9809504e91ca4db5c5f6d2468e6"} Jan 30 00:21:23 crc kubenswrapper[5119]: I0130 00:21:23.770670 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" event={"ID":"7f0802f1-a806-48cf-819e-27e152f93af2","Type":"ContainerStarted","Data":"2d02a61d6b5e3a7ad89bcecfa04749b881339ed71413ea6d83510a244c10217d"} Jan 30 00:21:23 crc kubenswrapper[5119]: I0130 00:21:23.770984 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:23 crc kubenswrapper[5119]: I0130 00:21:23.800181 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" podStartSLOduration=7.8001668760000005 podStartE2EDuration="7.800166876s" podCreationTimestamp="2026-01-30 00:21:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:21:23.797227474 +0000 UTC m=+687.811289943" watchObservedRunningTime="2026-01-30 00:21:23.800166876 +0000 UTC m=+687.814229335" Jan 30 00:21:23 crc kubenswrapper[5119]: I0130 00:21:23.803001 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:24 crc kubenswrapper[5119]: I0130 00:21:24.775783 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:24 crc kubenswrapper[5119]: I0130 00:21:24.776113 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:24 crc kubenswrapper[5119]: I0130 00:21:24.799877 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:21:56 crc kubenswrapper[5119]: I0130 00:21:56.809630 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-k558t" Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.126561 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495542-m9k4q"] Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.145100 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495542-m9k4q"] Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.145533 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495542-m9k4q" Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.148156 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-2vtgf\"" Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.148586 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.148706 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.176986 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmm9n\" (UniqueName: \"kubernetes.io/projected/43a2d1a2-e717-4e09-90e7-6200bb5ce8c2-kube-api-access-zmm9n\") pod \"auto-csr-approver-29495542-m9k4q\" (UID: \"43a2d1a2-e717-4e09-90e7-6200bb5ce8c2\") " pod="openshift-infra/auto-csr-approver-29495542-m9k4q" Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.278385 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-zmm9n\" (UniqueName: \"kubernetes.io/projected/43a2d1a2-e717-4e09-90e7-6200bb5ce8c2-kube-api-access-zmm9n\") pod \"auto-csr-approver-29495542-m9k4q\" (UID: \"43a2d1a2-e717-4e09-90e7-6200bb5ce8c2\") " pod="openshift-infra/auto-csr-approver-29495542-m9k4q" Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.297814 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmm9n\" (UniqueName: \"kubernetes.io/projected/43a2d1a2-e717-4e09-90e7-6200bb5ce8c2-kube-api-access-zmm9n\") pod \"auto-csr-approver-29495542-m9k4q\" (UID: \"43a2d1a2-e717-4e09-90e7-6200bb5ce8c2\") " pod="openshift-infra/auto-csr-approver-29495542-m9k4q" Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.467481 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495542-m9k4q" Jan 30 00:22:00 crc kubenswrapper[5119]: I0130 00:22:00.625191 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495542-m9k4q"] Jan 30 00:22:01 crc kubenswrapper[5119]: I0130 00:22:01.019309 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495542-m9k4q" event={"ID":"43a2d1a2-e717-4e09-90e7-6200bb5ce8c2","Type":"ContainerStarted","Data":"83b09ec2ff0fdc16bb891fe8e5102ba166ffbacdbecb258b2b20de2adae073d4"} Jan 30 00:22:03 crc kubenswrapper[5119]: I0130 00:22:03.029566 5119 generic.go:358] "Generic (PLEG): container finished" podID="43a2d1a2-e717-4e09-90e7-6200bb5ce8c2" containerID="e4ac56782ed3b44bd9060d377a40455e0d4008492ff6b3496189c932800b1633" exitCode=0 Jan 30 00:22:03 crc kubenswrapper[5119]: I0130 00:22:03.029664 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495542-m9k4q" event={"ID":"43a2d1a2-e717-4e09-90e7-6200bb5ce8c2","Type":"ContainerDied","Data":"e4ac56782ed3b44bd9060d377a40455e0d4008492ff6b3496189c932800b1633"} Jan 30 00:22:04 crc kubenswrapper[5119]: I0130 00:22:04.295654 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495542-m9k4q" Jan 30 00:22:04 crc kubenswrapper[5119]: I0130 00:22:04.325233 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmm9n\" (UniqueName: \"kubernetes.io/projected/43a2d1a2-e717-4e09-90e7-6200bb5ce8c2-kube-api-access-zmm9n\") pod \"43a2d1a2-e717-4e09-90e7-6200bb5ce8c2\" (UID: \"43a2d1a2-e717-4e09-90e7-6200bb5ce8c2\") " Jan 30 00:22:04 crc kubenswrapper[5119]: I0130 00:22:04.331131 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43a2d1a2-e717-4e09-90e7-6200bb5ce8c2-kube-api-access-zmm9n" (OuterVolumeSpecName: "kube-api-access-zmm9n") pod "43a2d1a2-e717-4e09-90e7-6200bb5ce8c2" (UID: "43a2d1a2-e717-4e09-90e7-6200bb5ce8c2"). InnerVolumeSpecName "kube-api-access-zmm9n". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:22:04 crc kubenswrapper[5119]: I0130 00:22:04.427025 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zmm9n\" (UniqueName: \"kubernetes.io/projected/43a2d1a2-e717-4e09-90e7-6200bb5ce8c2-kube-api-access-zmm9n\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:05 crc kubenswrapper[5119]: I0130 00:22:05.047317 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495542-m9k4q" Jan 30 00:22:05 crc kubenswrapper[5119]: I0130 00:22:05.047358 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495542-m9k4q" event={"ID":"43a2d1a2-e717-4e09-90e7-6200bb5ce8c2","Type":"ContainerDied","Data":"83b09ec2ff0fdc16bb891fe8e5102ba166ffbacdbecb258b2b20de2adae073d4"} Jan 30 00:22:05 crc kubenswrapper[5119]: I0130 00:22:05.047423 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83b09ec2ff0fdc16bb891fe8e5102ba166ffbacdbecb258b2b20de2adae073d4" Jan 30 00:22:19 crc kubenswrapper[5119]: I0130 00:22:19.462258 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gdgwt"] Jan 30 00:22:19 crc kubenswrapper[5119]: I0130 00:22:19.462819 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gdgwt" podUID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerName="registry-server" containerID="cri-o://a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774" gracePeriod=30 Jan 30 00:22:19 crc kubenswrapper[5119]: I0130 00:22:19.765078 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:22:19 crc kubenswrapper[5119]: I0130 00:22:19.914375 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjhlk\" (UniqueName: \"kubernetes.io/projected/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-kube-api-access-gjhlk\") pod \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " Jan 30 00:22:19 crc kubenswrapper[5119]: I0130 00:22:19.914532 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-utilities\") pod \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " Jan 30 00:22:19 crc kubenswrapper[5119]: I0130 00:22:19.914574 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-catalog-content\") pod \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\" (UID: \"8bcf91a5-cf2e-4c76-ba89-d00720fbf424\") " Jan 30 00:22:19 crc kubenswrapper[5119]: I0130 00:22:19.916267 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-utilities" (OuterVolumeSpecName: "utilities") pod "8bcf91a5-cf2e-4c76-ba89-d00720fbf424" (UID: "8bcf91a5-cf2e-4c76-ba89-d00720fbf424"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:19 crc kubenswrapper[5119]: I0130 00:22:19.920875 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-kube-api-access-gjhlk" (OuterVolumeSpecName: "kube-api-access-gjhlk") pod "8bcf91a5-cf2e-4c76-ba89-d00720fbf424" (UID: "8bcf91a5-cf2e-4c76-ba89-d00720fbf424"). InnerVolumeSpecName "kube-api-access-gjhlk". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:22:19 crc kubenswrapper[5119]: I0130 00:22:19.927562 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8bcf91a5-cf2e-4c76-ba89-d00720fbf424" (UID: "8bcf91a5-cf2e-4c76-ba89-d00720fbf424"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.016634 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.016668 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.016678 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-gjhlk\" (UniqueName: \"kubernetes.io/projected/8bcf91a5-cf2e-4c76-ba89-d00720fbf424-kube-api-access-gjhlk\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.127610 5119 generic.go:358] "Generic (PLEG): container finished" podID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerID="a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774" exitCode=0 Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.127664 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gdgwt" event={"ID":"8bcf91a5-cf2e-4c76-ba89-d00720fbf424","Type":"ContainerDied","Data":"a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774"} Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.127707 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gdgwt" event={"ID":"8bcf91a5-cf2e-4c76-ba89-d00720fbf424","Type":"ContainerDied","Data":"c98e68227fd8298b713fc22e15dce5b935c431ad7eda7778b121afa9c9e3538d"} Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.127720 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gdgwt" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.127727 5119 scope.go:117] "RemoveContainer" containerID="a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.152205 5119 scope.go:117] "RemoveContainer" containerID="29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.161790 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gdgwt"] Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.170182 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gdgwt"] Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.184191 5119 scope.go:117] "RemoveContainer" containerID="d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.196429 5119 scope.go:117] "RemoveContainer" containerID="a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774" Jan 30 00:22:20 crc kubenswrapper[5119]: E0130 00:22:20.196851 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774\": container with ID starting with a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774 not found: ID does not exist" containerID="a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.196890 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774"} err="failed to get container status \"a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774\": rpc error: code = NotFound desc = could not find container \"a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774\": container with ID starting with a7b03d0b7ab526acede5616b2d59445e7b0a4bb1a388d7d90f128a80dea7b774 not found: ID does not exist" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.196915 5119 scope.go:117] "RemoveContainer" containerID="29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60" Jan 30 00:22:20 crc kubenswrapper[5119]: E0130 00:22:20.197224 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60\": container with ID starting with 29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60 not found: ID does not exist" containerID="29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.197248 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60"} err="failed to get container status \"29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60\": rpc error: code = NotFound desc = could not find container \"29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60\": container with ID starting with 29c4509eeebcecc63b330a2ac47f8cfce9b953723b8416eac9e96df31bafac60 not found: ID does not exist" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.197260 5119 scope.go:117] "RemoveContainer" containerID="d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd" Jan 30 00:22:20 crc kubenswrapper[5119]: E0130 00:22:20.197740 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd\": container with ID starting with d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd not found: ID does not exist" containerID="d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.197776 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd"} err="failed to get container status \"d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd\": rpc error: code = NotFound desc = could not find container \"d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd\": container with ID starting with d8dbb3b94be51ec8ede5c6ce92b5d0012c2c0bf65ca37fd35b45338f9795cebd not found: ID does not exist" Jan 30 00:22:20 crc kubenswrapper[5119]: I0130 00:22:20.755256 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" path="/var/lib/kubelet/pods/8bcf91a5-cf2e-4c76-ba89-d00720fbf424/volumes" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.971472 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g"] Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.971986 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerName="extract-content" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.971998 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerName="extract-content" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.972011 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerName="extract-utilities" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.972017 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerName="extract-utilities" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.972026 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="43a2d1a2-e717-4e09-90e7-6200bb5ce8c2" containerName="oc" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.972032 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="43a2d1a2-e717-4e09-90e7-6200bb5ce8c2" containerName="oc" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.972051 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerName="registry-server" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.972057 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerName="registry-server" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.972149 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="8bcf91a5-cf2e-4c76-ba89-d00720fbf424" containerName="registry-server" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.972162 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="43a2d1a2-e717-4e09-90e7-6200bb5ce8c2" containerName="oc" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.989123 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g"] Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.989350 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:22 crc kubenswrapper[5119]: I0130 00:22:22.991547 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"default-dockercfg-b2ccr\"" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.153688 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vp2d\" (UniqueName: \"kubernetes.io/projected/39602e22-ff75-4157-8420-295fb7d31495-kube-api-access-8vp2d\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.153729 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.153780 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.255246 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-8vp2d\" (UniqueName: \"kubernetes.io/projected/39602e22-ff75-4157-8420-295fb7d31495-kube-api-access-8vp2d\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.255292 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.255348 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.255915 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.255913 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.274054 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vp2d\" (UniqueName: \"kubernetes.io/projected/39602e22-ff75-4157-8420-295fb7d31495-kube-api-access-8vp2d\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.306194 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:23 crc kubenswrapper[5119]: I0130 00:22:23.485280 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g"] Jan 30 00:22:24 crc kubenswrapper[5119]: I0130 00:22:24.148421 5119 generic.go:358] "Generic (PLEG): container finished" podID="39602e22-ff75-4157-8420-295fb7d31495" containerID="85fef74fae874653239aee0142992667d77326316607454326d9407824a479e4" exitCode=0 Jan 30 00:22:24 crc kubenswrapper[5119]: I0130 00:22:24.148548 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" event={"ID":"39602e22-ff75-4157-8420-295fb7d31495","Type":"ContainerDied","Data":"85fef74fae874653239aee0142992667d77326316607454326d9407824a479e4"} Jan 30 00:22:24 crc kubenswrapper[5119]: I0130 00:22:24.148582 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" event={"ID":"39602e22-ff75-4157-8420-295fb7d31495","Type":"ContainerStarted","Data":"81f07f31b31ccefa70e65ee6312157536163e6bcea63cfaa27b0b5ec2e7b306f"} Jan 30 00:22:24 crc kubenswrapper[5119]: I0130 00:22:24.371481 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:22:24 crc kubenswrapper[5119]: I0130 00:22:24.371579 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.742272 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hnjb2"] Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.751323 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hnjb2"] Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.751552 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.885058 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-utilities\") pod \"redhat-operators-hnjb2\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.885317 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxldq\" (UniqueName: \"kubernetes.io/projected/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-kube-api-access-kxldq\") pod \"redhat-operators-hnjb2\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.885481 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-catalog-content\") pod \"redhat-operators-hnjb2\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.987151 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-catalog-content\") pod \"redhat-operators-hnjb2\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.987244 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-utilities\") pod \"redhat-operators-hnjb2\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.987272 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-kxldq\" (UniqueName: \"kubernetes.io/projected/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-kube-api-access-kxldq\") pod \"redhat-operators-hnjb2\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.987806 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-utilities\") pod \"redhat-operators-hnjb2\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:25 crc kubenswrapper[5119]: I0130 00:22:25.988454 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-catalog-content\") pod \"redhat-operators-hnjb2\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:26 crc kubenswrapper[5119]: I0130 00:22:26.009712 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxldq\" (UniqueName: \"kubernetes.io/projected/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-kube-api-access-kxldq\") pod \"redhat-operators-hnjb2\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:26 crc kubenswrapper[5119]: I0130 00:22:26.075845 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:26 crc kubenswrapper[5119]: I0130 00:22:26.176196 5119 generic.go:358] "Generic (PLEG): container finished" podID="39602e22-ff75-4157-8420-295fb7d31495" containerID="b48be84259128e0cc8a719380eb408458f5458ed805ca7a0305de795d24cecc4" exitCode=0 Jan 30 00:22:26 crc kubenswrapper[5119]: I0130 00:22:26.176305 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" event={"ID":"39602e22-ff75-4157-8420-295fb7d31495","Type":"ContainerDied","Data":"b48be84259128e0cc8a719380eb408458f5458ed805ca7a0305de795d24cecc4"} Jan 30 00:22:26 crc kubenswrapper[5119]: I0130 00:22:26.273006 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hnjb2"] Jan 30 00:22:27 crc kubenswrapper[5119]: I0130 00:22:27.184260 5119 generic.go:358] "Generic (PLEG): container finished" podID="39602e22-ff75-4157-8420-295fb7d31495" containerID="68420caef4d22f093100662e29449749294c26f66060a60f829fe80eb21abb49" exitCode=0 Jan 30 00:22:27 crc kubenswrapper[5119]: I0130 00:22:27.184344 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" event={"ID":"39602e22-ff75-4157-8420-295fb7d31495","Type":"ContainerDied","Data":"68420caef4d22f093100662e29449749294c26f66060a60f829fe80eb21abb49"} Jan 30 00:22:27 crc kubenswrapper[5119]: I0130 00:22:27.186898 5119 generic.go:358] "Generic (PLEG): container finished" podID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerID="f79c2da8e3a82c52f5371b5a55c43fc9c24e4ed3e5f6525503c4a774354f23a1" exitCode=0 Jan 30 00:22:27 crc kubenswrapper[5119]: I0130 00:22:27.187094 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnjb2" event={"ID":"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7","Type":"ContainerDied","Data":"f79c2da8e3a82c52f5371b5a55c43fc9c24e4ed3e5f6525503c4a774354f23a1"} Jan 30 00:22:27 crc kubenswrapper[5119]: I0130 00:22:27.187223 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnjb2" event={"ID":"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7","Type":"ContainerStarted","Data":"7c55963b62cbb4177fd7d66d737739bebb8c1455ecc9ba5b2b1a3eef43c746f7"} Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.194186 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnjb2" event={"ID":"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7","Type":"ContainerStarted","Data":"ce217bd6ee595f960054569d537f355c702233b0becd8b33b259e772f7d3f8d9"} Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.522950 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.618912 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-util\") pod \"39602e22-ff75-4157-8420-295fb7d31495\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.618954 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vp2d\" (UniqueName: \"kubernetes.io/projected/39602e22-ff75-4157-8420-295fb7d31495-kube-api-access-8vp2d\") pod \"39602e22-ff75-4157-8420-295fb7d31495\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.618986 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-bundle\") pod \"39602e22-ff75-4157-8420-295fb7d31495\" (UID: \"39602e22-ff75-4157-8420-295fb7d31495\") " Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.621403 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-bundle" (OuterVolumeSpecName: "bundle") pod "39602e22-ff75-4157-8420-295fb7d31495" (UID: "39602e22-ff75-4157-8420-295fb7d31495"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.624332 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39602e22-ff75-4157-8420-295fb7d31495-kube-api-access-8vp2d" (OuterVolumeSpecName: "kube-api-access-8vp2d") pod "39602e22-ff75-4157-8420-295fb7d31495" (UID: "39602e22-ff75-4157-8420-295fb7d31495"). InnerVolumeSpecName "kube-api-access-8vp2d". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.640736 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-util" (OuterVolumeSpecName: "util") pod "39602e22-ff75-4157-8420-295fb7d31495" (UID: "39602e22-ff75-4157-8420-295fb7d31495"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.720288 5119 reconciler_common.go:299] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.720321 5119 reconciler_common.go:299] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/39602e22-ff75-4157-8420-295fb7d31495-util\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:28 crc kubenswrapper[5119]: I0130 00:22:28.720353 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8vp2d\" (UniqueName: \"kubernetes.io/projected/39602e22-ff75-4157-8420-295fb7d31495-kube-api-access-8vp2d\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:29 crc kubenswrapper[5119]: I0130 00:22:29.200022 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" event={"ID":"39602e22-ff75-4157-8420-295fb7d31495","Type":"ContainerDied","Data":"81f07f31b31ccefa70e65ee6312157536163e6bcea63cfaa27b0b5ec2e7b306f"} Jan 30 00:22:29 crc kubenswrapper[5119]: I0130 00:22:29.200056 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g" Jan 30 00:22:29 crc kubenswrapper[5119]: I0130 00:22:29.200066 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81f07f31b31ccefa70e65ee6312157536163e6bcea63cfaa27b0b5ec2e7b306f" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.206295 5119 generic.go:358] "Generic (PLEG): container finished" podID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerID="ce217bd6ee595f960054569d537f355c702233b0becd8b33b259e772f7d3f8d9" exitCode=0 Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.206376 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnjb2" event={"ID":"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7","Type":"ContainerDied","Data":"ce217bd6ee595f960054569d537f355c702233b0becd8b33b259e772f7d3f8d9"} Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.573057 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6"] Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.574100 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="39602e22-ff75-4157-8420-295fb7d31495" containerName="pull" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.574217 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="39602e22-ff75-4157-8420-295fb7d31495" containerName="pull" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.574303 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="39602e22-ff75-4157-8420-295fb7d31495" containerName="extract" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.574367 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="39602e22-ff75-4157-8420-295fb7d31495" containerName="extract" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.574467 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="39602e22-ff75-4157-8420-295fb7d31495" containerName="util" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.574531 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="39602e22-ff75-4157-8420-295fb7d31495" containerName="util" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.574709 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="39602e22-ff75-4157-8420-295fb7d31495" containerName="extract" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.585575 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6"] Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.585773 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.590242 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"default-dockercfg-b2ccr\"" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.640275 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z96jn\" (UniqueName: \"kubernetes.io/projected/4e1cb387-b40d-4ab0-867c-b468c70b7ae8-kube-api-access-z96jn\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6\" (UID: \"4e1cb387-b40d-4ab0-867c-b468c70b7ae8\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.640336 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4e1cb387-b40d-4ab0-867c-b468c70b7ae8-bundle\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6\" (UID: \"4e1cb387-b40d-4ab0-867c-b468c70b7ae8\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.640356 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4e1cb387-b40d-4ab0-867c-b468c70b7ae8-util\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6\" (UID: \"4e1cb387-b40d-4ab0-867c-b468c70b7ae8\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.741302 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-z96jn\" (UniqueName: \"kubernetes.io/projected/4e1cb387-b40d-4ab0-867c-b468c70b7ae8-kube-api-access-z96jn\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6\" (UID: \"4e1cb387-b40d-4ab0-867c-b468c70b7ae8\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.741377 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4e1cb387-b40d-4ab0-867c-b468c70b7ae8-bundle\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6\" (UID: \"4e1cb387-b40d-4ab0-867c-b468c70b7ae8\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.741461 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4e1cb387-b40d-4ab0-867c-b468c70b7ae8-util\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6\" (UID: \"4e1cb387-b40d-4ab0-867c-b468c70b7ae8\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.741964 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4e1cb387-b40d-4ab0-867c-b468c70b7ae8-bundle\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6\" (UID: \"4e1cb387-b40d-4ab0-867c-b468c70b7ae8\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.742049 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4e1cb387-b40d-4ab0-867c-b468c70b7ae8-util\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6\" (UID: \"4e1cb387-b40d-4ab0-867c-b468c70b7ae8\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.767494 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-z96jn\" (UniqueName: \"kubernetes.io/projected/4e1cb387-b40d-4ab0-867c-b468c70b7ae8-kube-api-access-z96jn\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6\" (UID: \"4e1cb387-b40d-4ab0-867c-b468c70b7ae8\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:30 crc kubenswrapper[5119]: I0130 00:22:30.903015 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" Jan 30 00:22:31 crc kubenswrapper[5119]: I0130 00:22:31.151605 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6"] Jan 30 00:22:31 crc kubenswrapper[5119]: W0130 00:22:31.156935 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e1cb387_b40d_4ab0_867c_b468c70b7ae8.slice/crio-2cf78efce3a992e34c2a83ef86a4b57d41ea56bd3dd50b3fe9713279b9508edc WatchSource:0}: Error finding container 2cf78efce3a992e34c2a83ef86a4b57d41ea56bd3dd50b3fe9713279b9508edc: Status 404 returned error can't find the container with id 2cf78efce3a992e34c2a83ef86a4b57d41ea56bd3dd50b3fe9713279b9508edc Jan 30 00:22:31 crc kubenswrapper[5119]: I0130 00:22:31.212234 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" event={"ID":"4e1cb387-b40d-4ab0-867c-b468c70b7ae8","Type":"ContainerStarted","Data":"2cf78efce3a992e34c2a83ef86a4b57d41ea56bd3dd50b3fe9713279b9508edc"} Jan 30 00:22:31 crc kubenswrapper[5119]: I0130 00:22:31.216518 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnjb2" event={"ID":"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7","Type":"ContainerStarted","Data":"e9762b68aef12f1eac477b770c72b3fbd944ad952c32fc222ab73701e492ca83"} Jan 30 00:22:31 crc kubenswrapper[5119]: I0130 00:22:31.239089 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hnjb2" podStartSLOduration=5.529222483 podStartE2EDuration="6.239074301s" podCreationTimestamp="2026-01-30 00:22:25 +0000 UTC" firstStartedPulling="2026-01-30 00:22:27.188017936 +0000 UTC m=+751.202080395" lastFinishedPulling="2026-01-30 00:22:27.897869714 +0000 UTC m=+751.911932213" observedRunningTime="2026-01-30 00:22:31.237883821 +0000 UTC m=+755.251946290" watchObservedRunningTime="2026-01-30 00:22:31.239074301 +0000 UTC m=+755.253136760" Jan 30 00:22:32 crc kubenswrapper[5119]: I0130 00:22:32.223340 5119 generic.go:358] "Generic (PLEG): container finished" podID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" containerID="977474b765ba118c0ba254974d9d4e70f0344db197086e672e247df9e056fc44" exitCode=0 Jan 30 00:22:32 crc kubenswrapper[5119]: I0130 00:22:32.223445 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" event={"ID":"4e1cb387-b40d-4ab0-867c-b468c70b7ae8","Type":"ContainerDied","Data":"977474b765ba118c0ba254974d9d4e70f0344db197086e672e247df9e056fc44"} Jan 30 00:22:32 crc kubenswrapper[5119]: E0130 00:22:32.274528 5119 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:22:32 crc kubenswrapper[5119]: E0130 00:22:32.274734 5119 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z96jn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6_openshift-marketplace(4e1cb387-b40d-4ab0-867c-b468c70b7ae8): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:22:32 crc kubenswrapper[5119]: E0130 00:22:32.275996 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:22:33 crc kubenswrapper[5119]: E0130 00:22:33.230156 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.140674 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5wq22"] Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.425979 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5wq22"] Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.426151 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.502131 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-utilities\") pod \"certified-operators-5wq22\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.502209 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-catalog-content\") pod \"certified-operators-5wq22\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.502368 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4f6g\" (UniqueName: \"kubernetes.io/projected/52e51f9c-8123-47c2-b565-689b537aa06f-kube-api-access-t4f6g\") pod \"certified-operators-5wq22\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.603983 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-t4f6g\" (UniqueName: \"kubernetes.io/projected/52e51f9c-8123-47c2-b565-689b537aa06f-kube-api-access-t4f6g\") pod \"certified-operators-5wq22\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.604066 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-utilities\") pod \"certified-operators-5wq22\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.604104 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-catalog-content\") pod \"certified-operators-5wq22\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.604657 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-catalog-content\") pod \"certified-operators-5wq22\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.604783 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-utilities\") pod \"certified-operators-5wq22\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.657886 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4f6g\" (UniqueName: \"kubernetes.io/projected/52e51f9c-8123-47c2-b565-689b537aa06f-kube-api-access-t4f6g\") pod \"certified-operators-5wq22\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:35 crc kubenswrapper[5119]: I0130 00:22:35.742033 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:36 crc kubenswrapper[5119]: I0130 00:22:36.076855 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:36 crc kubenswrapper[5119]: I0130 00:22:36.077222 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:36 crc kubenswrapper[5119]: I0130 00:22:36.333839 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5wq22"] Jan 30 00:22:36 crc kubenswrapper[5119]: I0130 00:22:36.822294 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw"] Jan 30 00:22:36 crc kubenswrapper[5119]: I0130 00:22:36.825903 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:36 crc kubenswrapper[5119]: I0130 00:22:36.834101 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw"] Jan 30 00:22:36 crc kubenswrapper[5119]: I0130 00:22:36.920787 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:36 crc kubenswrapper[5119]: I0130 00:22:36.920852 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7fl2\" (UniqueName: \"kubernetes.io/projected/51444834-61f9-4867-b791-ae8d97bffd67-kube-api-access-s7fl2\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:36 crc kubenswrapper[5119]: I0130 00:22:36.920886 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.022495 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.022578 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.022799 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-s7fl2\" (UniqueName: \"kubernetes.io/projected/51444834-61f9-4867-b791-ae8d97bffd67-kube-api-access-s7fl2\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.023014 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.023244 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.045460 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7fl2\" (UniqueName: \"kubernetes.io/projected/51444834-61f9-4867-b791-ae8d97bffd67-kube-api-access-s7fl2\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.148113 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.240318 5119 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hnjb2" podUID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerName="registry-server" probeResult="failure" output=< Jan 30 00:22:37 crc kubenswrapper[5119]: timeout: failed to connect service ":50051" within 1s Jan 30 00:22:37 crc kubenswrapper[5119]: > Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.255050 5119 generic.go:358] "Generic (PLEG): container finished" podID="52e51f9c-8123-47c2-b565-689b537aa06f" containerID="6dae75cdd05d8f59f411cad81a62de8286e5ecd5f848aa19e88ba7c696fae251" exitCode=0 Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.255178 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5wq22" event={"ID":"52e51f9c-8123-47c2-b565-689b537aa06f","Type":"ContainerDied","Data":"6dae75cdd05d8f59f411cad81a62de8286e5ecd5f848aa19e88ba7c696fae251"} Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.255238 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5wq22" event={"ID":"52e51f9c-8123-47c2-b565-689b537aa06f","Type":"ContainerStarted","Data":"6919390f6d2de09bf54b2c5daf6e179496881128dc9192a00086b4b4318f3c24"} Jan 30 00:22:37 crc kubenswrapper[5119]: I0130 00:22:37.694252 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw"] Jan 30 00:22:38 crc kubenswrapper[5119]: I0130 00:22:38.270433 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5wq22" event={"ID":"52e51f9c-8123-47c2-b565-689b537aa06f","Type":"ContainerStarted","Data":"0b3742bbf2c91527013ac142650c19fcb4af5c9462b710db54e45b735bf50675"} Jan 30 00:22:38 crc kubenswrapper[5119]: I0130 00:22:38.271982 5119 generic.go:358] "Generic (PLEG): container finished" podID="51444834-61f9-4867-b791-ae8d97bffd67" containerID="dcdcde5de6ed89ff52c16c3d4f0e4f95f4e6b0a0691cd4279b8517da6d355013" exitCode=0 Jan 30 00:22:38 crc kubenswrapper[5119]: I0130 00:22:38.272113 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" event={"ID":"51444834-61f9-4867-b791-ae8d97bffd67","Type":"ContainerDied","Data":"dcdcde5de6ed89ff52c16c3d4f0e4f95f4e6b0a0691cd4279b8517da6d355013"} Jan 30 00:22:38 crc kubenswrapper[5119]: I0130 00:22:38.272152 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" event={"ID":"51444834-61f9-4867-b791-ae8d97bffd67","Type":"ContainerStarted","Data":"2af2a49aa226a23225f1f6eeb3d91e0639886d6cf79af896a8ab6c90ee96006e"} Jan 30 00:22:39 crc kubenswrapper[5119]: I0130 00:22:39.280115 5119 generic.go:358] "Generic (PLEG): container finished" podID="52e51f9c-8123-47c2-b565-689b537aa06f" containerID="0b3742bbf2c91527013ac142650c19fcb4af5c9462b710db54e45b735bf50675" exitCode=0 Jan 30 00:22:39 crc kubenswrapper[5119]: I0130 00:22:39.280166 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5wq22" event={"ID":"52e51f9c-8123-47c2-b565-689b537aa06f","Type":"ContainerDied","Data":"0b3742bbf2c91527013ac142650c19fcb4af5c9462b710db54e45b735bf50675"} Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.141362 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995"] Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.253545 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995"] Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.253616 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb"] Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.254991 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.257684 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operators\"/\"kube-root-ca.crt\"" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.257839 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"obo-prometheus-operator-dockercfg-nlr7j\"" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.259695 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operators\"/\"openshift-service-ca.crt\"" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.291609 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb"] Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.291659 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc"] Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.337336 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc"] Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.337409 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5wq22" event={"ID":"52e51f9c-8123-47c2-b565-689b537aa06f","Type":"ContainerStarted","Data":"1e2d4b26a8cbf5505525990096272a9e3e2c22f9a98fdb6fca56447ae33bc885"} Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.337572 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.338571 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.345131 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"obo-prometheus-operator-admission-webhook-dockercfg-kzlrr\"" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.345651 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"obo-prometheus-operator-admission-webhook-service-cert\"" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.373018 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/34241785-7479-467d-a949-303dc1e64d18-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb\" (UID: \"34241785-7479-467d-a949-303dc1e64d18\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.373078 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gm8qj\" (UniqueName: \"kubernetes.io/projected/6c2bf0c8-b86b-4a47-ad20-b8478b4188b0-kube-api-access-gm8qj\") pod \"obo-prometheus-operator-9bc85b4bf-9x995\" (UID: \"6c2bf0c8-b86b-4a47-ad20-b8478b4188b0\") " pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.373107 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c1c717fb-e705-4548-8aa0-823e4a6ddd8e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc\" (UID: \"c1c717fb-e705-4548-8aa0-823e4a6ddd8e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.373129 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c1c717fb-e705-4548-8aa0-823e4a6ddd8e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc\" (UID: \"c1c717fb-e705-4548-8aa0-823e4a6ddd8e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.373222 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/34241785-7479-467d-a949-303dc1e64d18-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb\" (UID: \"34241785-7479-467d-a949-303dc1e64d18\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.453308 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5wq22" podStartSLOduration=4.7356837370000004 podStartE2EDuration="5.453282285s" podCreationTimestamp="2026-01-30 00:22:35 +0000 UTC" firstStartedPulling="2026-01-30 00:22:37.256099463 +0000 UTC m=+761.270161922" lastFinishedPulling="2026-01-30 00:22:37.973698011 +0000 UTC m=+761.987760470" observedRunningTime="2026-01-30 00:22:40.429861873 +0000 UTC m=+764.443924332" watchObservedRunningTime="2026-01-30 00:22:40.453282285 +0000 UTC m=+764.467344754" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.476145 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/34241785-7479-467d-a949-303dc1e64d18-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb\" (UID: \"34241785-7479-467d-a949-303dc1e64d18\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.476206 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/34241785-7479-467d-a949-303dc1e64d18-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb\" (UID: \"34241785-7479-467d-a949-303dc1e64d18\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.476249 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gm8qj\" (UniqueName: \"kubernetes.io/projected/6c2bf0c8-b86b-4a47-ad20-b8478b4188b0-kube-api-access-gm8qj\") pod \"obo-prometheus-operator-9bc85b4bf-9x995\" (UID: \"6c2bf0c8-b86b-4a47-ad20-b8478b4188b0\") " pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.476270 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c1c717fb-e705-4548-8aa0-823e4a6ddd8e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc\" (UID: \"c1c717fb-e705-4548-8aa0-823e4a6ddd8e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.476289 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c1c717fb-e705-4548-8aa0-823e4a6ddd8e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc\" (UID: \"c1c717fb-e705-4548-8aa0-823e4a6ddd8e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.485203 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/34241785-7479-467d-a949-303dc1e64d18-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb\" (UID: \"34241785-7479-467d-a949-303dc1e64d18\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.489357 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c1c717fb-e705-4548-8aa0-823e4a6ddd8e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc\" (UID: \"c1c717fb-e705-4548-8aa0-823e4a6ddd8e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.491082 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c1c717fb-e705-4548-8aa0-823e4a6ddd8e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc\" (UID: \"c1c717fb-e705-4548-8aa0-823e4a6ddd8e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.491494 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/34241785-7479-467d-a949-303dc1e64d18-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb\" (UID: \"34241785-7479-467d-a949-303dc1e64d18\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.530914 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-gm8qj\" (UniqueName: \"kubernetes.io/projected/6c2bf0c8-b86b-4a47-ad20-b8478b4188b0-kube-api-access-gm8qj\") pod \"obo-prometheus-operator-9bc85b4bf-9x995\" (UID: \"6c2bf0c8-b86b-4a47-ad20-b8478b4188b0\") " pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.532355 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-85c68dddb-9gt5v"] Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.571613 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.702900 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.724560 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.794287 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-85c68dddb-9gt5v" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.798098 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"observability-operator-sa-dockercfg-jrkcn\"" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.798340 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"observability-operator-tls\"" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.823299 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-85c68dddb-9gt5v"] Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.823331 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-669c9f96b5-m42vj"] Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.884265 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/30e86055-71d9-4e6e-8268-d8f123e74fb5-observability-operator-tls\") pod \"observability-operator-85c68dddb-9gt5v\" (UID: \"30e86055-71d9-4e6e-8268-d8f123e74fb5\") " pod="openshift-operators/observability-operator-85c68dddb-9gt5v" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.884325 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzt9s\" (UniqueName: \"kubernetes.io/projected/30e86055-71d9-4e6e-8268-d8f123e74fb5-kube-api-access-gzt9s\") pod \"observability-operator-85c68dddb-9gt5v\" (UID: \"30e86055-71d9-4e6e-8268-d8f123e74fb5\") " pod="openshift-operators/observability-operator-85c68dddb-9gt5v" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.922604 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-669c9f96b5-m42vj"] Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.922842 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-669c9f96b5-m42vj" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.929432 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"perses-operator-dockercfg-9ppf4\"" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.985264 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/8a36cf47-b445-4da0-ad4f-d9f080136a33-openshift-service-ca\") pod \"perses-operator-669c9f96b5-m42vj\" (UID: \"8a36cf47-b445-4da0-ad4f-d9f080136a33\") " pod="openshift-operators/perses-operator-669c9f96b5-m42vj" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.985318 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v7q8\" (UniqueName: \"kubernetes.io/projected/8a36cf47-b445-4da0-ad4f-d9f080136a33-kube-api-access-2v7q8\") pod \"perses-operator-669c9f96b5-m42vj\" (UID: \"8a36cf47-b445-4da0-ad4f-d9f080136a33\") " pod="openshift-operators/perses-operator-669c9f96b5-m42vj" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.985350 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gzt9s\" (UniqueName: \"kubernetes.io/projected/30e86055-71d9-4e6e-8268-d8f123e74fb5-kube-api-access-gzt9s\") pod \"observability-operator-85c68dddb-9gt5v\" (UID: \"30e86055-71d9-4e6e-8268-d8f123e74fb5\") " pod="openshift-operators/observability-operator-85c68dddb-9gt5v" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.985648 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/30e86055-71d9-4e6e-8268-d8f123e74fb5-observability-operator-tls\") pod \"observability-operator-85c68dddb-9gt5v\" (UID: \"30e86055-71d9-4e6e-8268-d8f123e74fb5\") " pod="openshift-operators/observability-operator-85c68dddb-9gt5v" Jan 30 00:22:40 crc kubenswrapper[5119]: I0130 00:22:40.992788 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/30e86055-71d9-4e6e-8268-d8f123e74fb5-observability-operator-tls\") pod \"observability-operator-85c68dddb-9gt5v\" (UID: \"30e86055-71d9-4e6e-8268-d8f123e74fb5\") " pod="openshift-operators/observability-operator-85c68dddb-9gt5v" Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.006466 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzt9s\" (UniqueName: \"kubernetes.io/projected/30e86055-71d9-4e6e-8268-d8f123e74fb5-kube-api-access-gzt9s\") pod \"observability-operator-85c68dddb-9gt5v\" (UID: \"30e86055-71d9-4e6e-8268-d8f123e74fb5\") " pod="openshift-operators/observability-operator-85c68dddb-9gt5v" Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.087330 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/8a36cf47-b445-4da0-ad4f-d9f080136a33-openshift-service-ca\") pod \"perses-operator-669c9f96b5-m42vj\" (UID: \"8a36cf47-b445-4da0-ad4f-d9f080136a33\") " pod="openshift-operators/perses-operator-669c9f96b5-m42vj" Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.087406 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2v7q8\" (UniqueName: \"kubernetes.io/projected/8a36cf47-b445-4da0-ad4f-d9f080136a33-kube-api-access-2v7q8\") pod \"perses-operator-669c9f96b5-m42vj\" (UID: \"8a36cf47-b445-4da0-ad4f-d9f080136a33\") " pod="openshift-operators/perses-operator-669c9f96b5-m42vj" Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.088475 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/8a36cf47-b445-4da0-ad4f-d9f080136a33-openshift-service-ca\") pod \"perses-operator-669c9f96b5-m42vj\" (UID: \"8a36cf47-b445-4da0-ad4f-d9f080136a33\") " pod="openshift-operators/perses-operator-669c9f96b5-m42vj" Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.106055 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v7q8\" (UniqueName: \"kubernetes.io/projected/8a36cf47-b445-4da0-ad4f-d9f080136a33-kube-api-access-2v7q8\") pod \"perses-operator-669c9f96b5-m42vj\" (UID: \"8a36cf47-b445-4da0-ad4f-d9f080136a33\") " pod="openshift-operators/perses-operator-669c9f96b5-m42vj" Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.122784 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-85c68dddb-9gt5v" Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.167232 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995"] Jan 30 00:22:41 crc kubenswrapper[5119]: W0130 00:22:41.194514 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6c2bf0c8_b86b_4a47_ad20_b8478b4188b0.slice/crio-1d2cbe7ff727dddcd1ec62aac8efae7daa78500674fc24030c0949a0e3020d01 WatchSource:0}: Error finding container 1d2cbe7ff727dddcd1ec62aac8efae7daa78500674fc24030c0949a0e3020d01: Status 404 returned error can't find the container with id 1d2cbe7ff727dddcd1ec62aac8efae7daa78500674fc24030c0949a0e3020d01 Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.248213 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-669c9f96b5-m42vj" Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.303792 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995" event={"ID":"6c2bf0c8-b86b-4a47-ad20-b8478b4188b0","Type":"ContainerStarted","Data":"1d2cbe7ff727dddcd1ec62aac8efae7daa78500674fc24030c0949a0e3020d01"} Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.305603 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb"] Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.363184 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc"] Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.465974 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-85c68dddb-9gt5v"] Jan 30 00:22:41 crc kubenswrapper[5119]: I0130 00:22:41.592201 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-669c9f96b5-m42vj"] Jan 30 00:22:42 crc kubenswrapper[5119]: I0130 00:22:42.309424 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" event={"ID":"c1c717fb-e705-4548-8aa0-823e4a6ddd8e","Type":"ContainerStarted","Data":"c649404f027dfa477cacc951b9f1f08e49ff7683855d35fb342b032cb0a770ec"} Jan 30 00:22:42 crc kubenswrapper[5119]: I0130 00:22:42.310482 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" event={"ID":"34241785-7479-467d-a949-303dc1e64d18","Type":"ContainerStarted","Data":"ecf8b7f1930595d3cb64de25a1cdc9e9935ec00f83f39f2730808ea9800bd2e2"} Jan 30 00:22:44 crc kubenswrapper[5119]: W0130 00:22:44.128043 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a36cf47_b445_4da0_ad4f_d9f080136a33.slice/crio-9d912f866b797c0ce315f9454a6db6e42967fad535b2c51290f250439ec3cd0e WatchSource:0}: Error finding container 9d912f866b797c0ce315f9454a6db6e42967fad535b2c51290f250439ec3cd0e: Status 404 returned error can't find the container with id 9d912f866b797c0ce315f9454a6db6e42967fad535b2c51290f250439ec3cd0e Jan 30 00:22:44 crc kubenswrapper[5119]: W0130 00:22:44.129680 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30e86055_71d9_4e6e_8268_d8f123e74fb5.slice/crio-9bdd96ef69a97c3ed9eca62234a3a9016ea23546ff9180dedb1972aab3c3329d WatchSource:0}: Error finding container 9bdd96ef69a97c3ed9eca62234a3a9016ea23546ff9180dedb1972aab3c3329d: Status 404 returned error can't find the container with id 9bdd96ef69a97c3ed9eca62234a3a9016ea23546ff9180dedb1972aab3c3329d Jan 30 00:22:44 crc kubenswrapper[5119]: I0130 00:22:44.323566 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-85c68dddb-9gt5v" event={"ID":"30e86055-71d9-4e6e-8268-d8f123e74fb5","Type":"ContainerStarted","Data":"9bdd96ef69a97c3ed9eca62234a3a9016ea23546ff9180dedb1972aab3c3329d"} Jan 30 00:22:44 crc kubenswrapper[5119]: I0130 00:22:44.324584 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-669c9f96b5-m42vj" event={"ID":"8a36cf47-b445-4da0-ad4f-d9f080136a33","Type":"ContainerStarted","Data":"9d912f866b797c0ce315f9454a6db6e42967fad535b2c51290f250439ec3cd0e"} Jan 30 00:22:45 crc kubenswrapper[5119]: I0130 00:22:45.331807 5119 generic.go:358] "Generic (PLEG): container finished" podID="51444834-61f9-4867-b791-ae8d97bffd67" containerID="0513963615b6b52daa913296906cde6128c241738452734bd6955f7658ba6614" exitCode=0 Jan 30 00:22:45 crc kubenswrapper[5119]: I0130 00:22:45.332166 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" event={"ID":"51444834-61f9-4867-b791-ae8d97bffd67","Type":"ContainerDied","Data":"0513963615b6b52daa913296906cde6128c241738452734bd6955f7658ba6614"} Jan 30 00:22:45 crc kubenswrapper[5119]: I0130 00:22:45.742976 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:45 crc kubenswrapper[5119]: I0130 00:22:45.743329 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:45 crc kubenswrapper[5119]: I0130 00:22:45.806993 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:46 crc kubenswrapper[5119]: I0130 00:22:46.143289 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:46 crc kubenswrapper[5119]: I0130 00:22:46.206725 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:46 crc kubenswrapper[5119]: I0130 00:22:46.347640 5119 generic.go:358] "Generic (PLEG): container finished" podID="51444834-61f9-4867-b791-ae8d97bffd67" containerID="a4f01b0edd37379c25017ead0c76bc9cc29f2b7b506e42f841358f26a11f3840" exitCode=0 Jan 30 00:22:46 crc kubenswrapper[5119]: I0130 00:22:46.348732 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" event={"ID":"51444834-61f9-4867-b791-ae8d97bffd67","Type":"ContainerDied","Data":"a4f01b0edd37379c25017ead0c76bc9cc29f2b7b506e42f841358f26a11f3840"} Jan 30 00:22:46 crc kubenswrapper[5119]: I0130 00:22:46.462000 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:49 crc kubenswrapper[5119]: E0130 00:22:49.000710 5119 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:22:49 crc kubenswrapper[5119]: E0130 00:22:49.000985 5119 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z96jn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6_openshift-marketplace(4e1cb387-b40d-4ab0-867c-b468c70b7ae8): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:22:49 crc kubenswrapper[5119]: E0130 00:22:49.002183 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:22:49 crc kubenswrapper[5119]: I0130 00:22:49.943084 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.012999 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7fl2\" (UniqueName: \"kubernetes.io/projected/51444834-61f9-4867-b791-ae8d97bffd67-kube-api-access-s7fl2\") pod \"51444834-61f9-4867-b791-ae8d97bffd67\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.013148 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-util\") pod \"51444834-61f9-4867-b791-ae8d97bffd67\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.013184 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-bundle\") pod \"51444834-61f9-4867-b791-ae8d97bffd67\" (UID: \"51444834-61f9-4867-b791-ae8d97bffd67\") " Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.014261 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-bundle" (OuterVolumeSpecName: "bundle") pod "51444834-61f9-4867-b791-ae8d97bffd67" (UID: "51444834-61f9-4867-b791-ae8d97bffd67"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.019289 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51444834-61f9-4867-b791-ae8d97bffd67-kube-api-access-s7fl2" (OuterVolumeSpecName: "kube-api-access-s7fl2") pod "51444834-61f9-4867-b791-ae8d97bffd67" (UID: "51444834-61f9-4867-b791-ae8d97bffd67"). InnerVolumeSpecName "kube-api-access-s7fl2". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.023270 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-util" (OuterVolumeSpecName: "util") pod "51444834-61f9-4867-b791-ae8d97bffd67" (UID: "51444834-61f9-4867-b791-ae8d97bffd67"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.114478 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-s7fl2\" (UniqueName: \"kubernetes.io/projected/51444834-61f9-4867-b791-ae8d97bffd67-kube-api-access-s7fl2\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.114515 5119 reconciler_common.go:299] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-util\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.114526 5119 reconciler_common.go:299] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51444834-61f9-4867-b791-ae8d97bffd67-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.328271 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5wq22"] Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.328635 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5wq22" podUID="52e51f9c-8123-47c2-b565-689b537aa06f" containerName="registry-server" containerID="cri-o://1e2d4b26a8cbf5505525990096272a9e3e2c22f9a98fdb6fca56447ae33bc885" gracePeriod=2 Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.380770 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" event={"ID":"51444834-61f9-4867-b791-ae8d97bffd67","Type":"ContainerDied","Data":"2af2a49aa226a23225f1f6eeb3d91e0639886d6cf79af896a8ab6c90ee96006e"} Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.380809 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2af2a49aa226a23225f1f6eeb3d91e0639886d6cf79af896a8ab6c90ee96006e" Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.380897 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw" Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.531403 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hnjb2"] Jan 30 00:22:50 crc kubenswrapper[5119]: I0130 00:22:50.531802 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hnjb2" podUID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerName="registry-server" containerID="cri-o://e9762b68aef12f1eac477b770c72b3fbd944ad952c32fc222ab73701e492ca83" gracePeriod=2 Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.388315 5119 generic.go:358] "Generic (PLEG): container finished" podID="52e51f9c-8123-47c2-b565-689b537aa06f" containerID="1e2d4b26a8cbf5505525990096272a9e3e2c22f9a98fdb6fca56447ae33bc885" exitCode=0 Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.388582 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5wq22" event={"ID":"52e51f9c-8123-47c2-b565-689b537aa06f","Type":"ContainerDied","Data":"1e2d4b26a8cbf5505525990096272a9e3e2c22f9a98fdb6fca56447ae33bc885"} Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.390517 5119 generic.go:358] "Generic (PLEG): container finished" podID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerID="e9762b68aef12f1eac477b770c72b3fbd944ad952c32fc222ab73701e492ca83" exitCode=0 Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.390635 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnjb2" event={"ID":"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7","Type":"ContainerDied","Data":"e9762b68aef12f1eac477b770c72b3fbd944ad952c32fc222ab73701e492ca83"} Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.562325 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.566284 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.637338 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxldq\" (UniqueName: \"kubernetes.io/projected/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-kube-api-access-kxldq\") pod \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.637403 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-catalog-content\") pod \"52e51f9c-8123-47c2-b565-689b537aa06f\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.637432 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-catalog-content\") pod \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.637457 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-utilities\") pod \"52e51f9c-8123-47c2-b565-689b537aa06f\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.637508 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-utilities\") pod \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\" (UID: \"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7\") " Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.637617 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4f6g\" (UniqueName: \"kubernetes.io/projected/52e51f9c-8123-47c2-b565-689b537aa06f-kube-api-access-t4f6g\") pod \"52e51f9c-8123-47c2-b565-689b537aa06f\" (UID: \"52e51f9c-8123-47c2-b565-689b537aa06f\") " Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.639825 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-utilities" (OuterVolumeSpecName: "utilities") pod "52e51f9c-8123-47c2-b565-689b537aa06f" (UID: "52e51f9c-8123-47c2-b565-689b537aa06f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.647367 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-utilities" (OuterVolumeSpecName: "utilities") pod "13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" (UID: "13f9e5fa-78c6-4312-a2c5-d32b5186c9b7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.648732 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52e51f9c-8123-47c2-b565-689b537aa06f-kube-api-access-t4f6g" (OuterVolumeSpecName: "kube-api-access-t4f6g") pod "52e51f9c-8123-47c2-b565-689b537aa06f" (UID: "52e51f9c-8123-47c2-b565-689b537aa06f"). InnerVolumeSpecName "kube-api-access-t4f6g". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.648813 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-kube-api-access-kxldq" (OuterVolumeSpecName: "kube-api-access-kxldq") pod "13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" (UID: "13f9e5fa-78c6-4312-a2c5-d32b5186c9b7"). InnerVolumeSpecName "kube-api-access-kxldq". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.672593 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52e51f9c-8123-47c2-b565-689b537aa06f" (UID: "52e51f9c-8123-47c2-b565-689b537aa06f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.742693 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.742728 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-t4f6g\" (UniqueName: \"kubernetes.io/projected/52e51f9c-8123-47c2-b565-689b537aa06f-kube-api-access-t4f6g\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.742739 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-kxldq\" (UniqueName: \"kubernetes.io/projected/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-kube-api-access-kxldq\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.742748 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.742756 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52e51f9c-8123-47c2-b565-689b537aa06f-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.750482 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" (UID: "13f9e5fa-78c6-4312-a2c5-d32b5186c9b7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:51 crc kubenswrapper[5119]: I0130 00:22:51.844771 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.405990 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5wq22" event={"ID":"52e51f9c-8123-47c2-b565-689b537aa06f","Type":"ContainerDied","Data":"6919390f6d2de09bf54b2c5daf6e179496881128dc9192a00086b4b4318f3c24"} Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.406043 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5wq22" Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.406283 5119 scope.go:117] "RemoveContainer" containerID="1e2d4b26a8cbf5505525990096272a9e3e2c22f9a98fdb6fca56447ae33bc885" Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.429184 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnjb2" event={"ID":"13f9e5fa-78c6-4312-a2c5-d32b5186c9b7","Type":"ContainerDied","Data":"7c55963b62cbb4177fd7d66d737739bebb8c1455ecc9ba5b2b1a3eef43c746f7"} Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.429256 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hnjb2" Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.448036 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5wq22"] Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.459715 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5wq22"] Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.468380 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hnjb2"] Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.476123 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hnjb2"] Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.755326 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" path="/var/lib/kubelet/pods/13f9e5fa-78c6-4312-a2c5-d32b5186c9b7/volumes" Jan 30 00:22:52 crc kubenswrapper[5119]: I0130 00:22:52.756109 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52e51f9c-8123-47c2-b565-689b537aa06f" path="/var/lib/kubelet/pods/52e51f9c-8123-47c2-b565-689b537aa06f/volumes" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.586292 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q"] Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.587408 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="51444834-61f9-4867-b791-ae8d97bffd67" containerName="pull" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.587488 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="51444834-61f9-4867-b791-ae8d97bffd67" containerName="pull" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.587558 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="52e51f9c-8123-47c2-b565-689b537aa06f" containerName="registry-server" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.587612 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="52e51f9c-8123-47c2-b565-689b537aa06f" containerName="registry-server" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.587671 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="51444834-61f9-4867-b791-ae8d97bffd67" containerName="util" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.587722 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="51444834-61f9-4867-b791-ae8d97bffd67" containerName="util" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.587782 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="52e51f9c-8123-47c2-b565-689b537aa06f" containerName="extract-utilities" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.587833 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="52e51f9c-8123-47c2-b565-689b537aa06f" containerName="extract-utilities" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.587890 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="52e51f9c-8123-47c2-b565-689b537aa06f" containerName="extract-content" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.587940 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="52e51f9c-8123-47c2-b565-689b537aa06f" containerName="extract-content" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588007 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerName="extract-content" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588059 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerName="extract-content" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588118 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerName="extract-utilities" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588171 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerName="extract-utilities" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588226 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="51444834-61f9-4867-b791-ae8d97bffd67" containerName="extract" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588276 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="51444834-61f9-4867-b791-ae8d97bffd67" containerName="extract" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588343 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerName="registry-server" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588469 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerName="registry-server" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588628 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="51444834-61f9-4867-b791-ae8d97bffd67" containerName="extract" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588690 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="52e51f9c-8123-47c2-b565-689b537aa06f" containerName="registry-server" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.588747 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="13f9e5fa-78c6-4312-a2c5-d32b5186c9b7" containerName="registry-server" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.888857 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q"] Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.888965 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.891386 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"cert-manager-operator\"/\"cert-manager-operator-controller-manager-dockercfg-wh42q\"" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.891932 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"cert-manager-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.892111 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"cert-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.972915 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/9d611a14-2f32-40ad-87d6-fc2d6cd916fa-tmp\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-7k54q\" (UID: \"9d611a14-2f32-40ad-87d6-fc2d6cd916fa\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" Jan 30 00:22:53 crc kubenswrapper[5119]: I0130 00:22:53.973016 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q55vv\" (UniqueName: \"kubernetes.io/projected/9d611a14-2f32-40ad-87d6-fc2d6cd916fa-kube-api-access-q55vv\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-7k54q\" (UID: \"9d611a14-2f32-40ad-87d6-fc2d6cd916fa\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" Jan 30 00:22:54 crc kubenswrapper[5119]: I0130 00:22:54.074744 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/9d611a14-2f32-40ad-87d6-fc2d6cd916fa-tmp\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-7k54q\" (UID: \"9d611a14-2f32-40ad-87d6-fc2d6cd916fa\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" Jan 30 00:22:54 crc kubenswrapper[5119]: I0130 00:22:54.075238 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-q55vv\" (UniqueName: \"kubernetes.io/projected/9d611a14-2f32-40ad-87d6-fc2d6cd916fa-kube-api-access-q55vv\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-7k54q\" (UID: \"9d611a14-2f32-40ad-87d6-fc2d6cd916fa\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" Jan 30 00:22:54 crc kubenswrapper[5119]: I0130 00:22:54.075465 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/9d611a14-2f32-40ad-87d6-fc2d6cd916fa-tmp\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-7k54q\" (UID: \"9d611a14-2f32-40ad-87d6-fc2d6cd916fa\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" Jan 30 00:22:54 crc kubenswrapper[5119]: I0130 00:22:54.107845 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-q55vv\" (UniqueName: \"kubernetes.io/projected/9d611a14-2f32-40ad-87d6-fc2d6cd916fa-kube-api-access-q55vv\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-7k54q\" (UID: \"9d611a14-2f32-40ad-87d6-fc2d6cd916fa\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" Jan 30 00:22:54 crc kubenswrapper[5119]: I0130 00:22:54.211448 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" Jan 30 00:22:54 crc kubenswrapper[5119]: I0130 00:22:54.371063 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:22:54 crc kubenswrapper[5119]: I0130 00:22:54.371410 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:22:56 crc kubenswrapper[5119]: I0130 00:22:56.709917 5119 scope.go:117] "RemoveContainer" containerID="0b3742bbf2c91527013ac142650c19fcb4af5c9462b710db54e45b735bf50675" Jan 30 00:22:56 crc kubenswrapper[5119]: I0130 00:22:56.780991 5119 scope.go:117] "RemoveContainer" containerID="6dae75cdd05d8f59f411cad81a62de8286e5ecd5f848aa19e88ba7c696fae251" Jan 30 00:22:56 crc kubenswrapper[5119]: I0130 00:22:56.810828 5119 scope.go:117] "RemoveContainer" containerID="e9762b68aef12f1eac477b770c72b3fbd944ad952c32fc222ab73701e492ca83" Jan 30 00:22:56 crc kubenswrapper[5119]: I0130 00:22:56.844955 5119 scope.go:117] "RemoveContainer" containerID="ce217bd6ee595f960054569d537f355c702233b0becd8b33b259e772f7d3f8d9" Jan 30 00:22:56 crc kubenswrapper[5119]: I0130 00:22:56.963588 5119 scope.go:117] "RemoveContainer" containerID="f79c2da8e3a82c52f5371b5a55c43fc9c24e4ed3e5f6525503c4a774354f23a1" Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.096576 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q"] Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.490221 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" event={"ID":"34241785-7479-467d-a949-303dc1e64d18","Type":"ContainerStarted","Data":"bec7b946dd2cc65ccc0d21a923aa181b5f1d848f7a4500bfbb2f988f52024de9"} Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.494278 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-85c68dddb-9gt5v" event={"ID":"30e86055-71d9-4e6e-8268-d8f123e74fb5","Type":"ContainerStarted","Data":"3d4727b41ce31d84ab2ba6127e07d61cddb93b5f6117f540bc6db21f89de0486"} Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.494718 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operators/observability-operator-85c68dddb-9gt5v" Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.496264 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" event={"ID":"9d611a14-2f32-40ad-87d6-fc2d6cd916fa","Type":"ContainerStarted","Data":"310c3e62c01d0925e9e12a455aa7a2b7b80efb776b37351673852be5fd219a1e"} Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.498066 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-669c9f96b5-m42vj" event={"ID":"8a36cf47-b445-4da0-ad4f-d9f080136a33","Type":"ContainerStarted","Data":"b513dc0744124bbef63964f74bb1a93bb34c205f870b11efaddc98efa485c5e5"} Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.498219 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operators/perses-operator-669c9f96b5-m42vj" Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.500219 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995" event={"ID":"6c2bf0c8-b86b-4a47-ad20-b8478b4188b0","Type":"ContainerStarted","Data":"a1dc8753ebfab71e9e789f48ddac669a95d66e5210f19483d40c2bf7e132bd0a"} Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.504122 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" event={"ID":"c1c717fb-e705-4548-8aa0-823e4a6ddd8e","Type":"ContainerStarted","Data":"7416a3be052206dc8930a51041896ec85841e78c315b30801fbfa9296ba8b7dc"} Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.512894 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-85c68dddb-9gt5v" Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.512952 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb" podStartSLOduration=2.073350894 podStartE2EDuration="17.512935911s" podCreationTimestamp="2026-01-30 00:22:40 +0000 UTC" firstStartedPulling="2026-01-30 00:22:41.343870867 +0000 UTC m=+765.357933326" lastFinishedPulling="2026-01-30 00:22:56.783455884 +0000 UTC m=+780.797518343" observedRunningTime="2026-01-30 00:22:57.510750926 +0000 UTC m=+781.524813405" watchObservedRunningTime="2026-01-30 00:22:57.512935911 +0000 UTC m=+781.526998370" Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.552948 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-9x995" podStartSLOduration=2.006857246 podStartE2EDuration="17.552929389s" podCreationTimestamp="2026-01-30 00:22:40 +0000 UTC" firstStartedPulling="2026-01-30 00:22:41.212752726 +0000 UTC m=+765.226815175" lastFinishedPulling="2026-01-30 00:22:56.758824849 +0000 UTC m=+780.772887318" observedRunningTime="2026-01-30 00:22:57.549911184 +0000 UTC m=+781.563973643" watchObservedRunningTime="2026-01-30 00:22:57.552929389 +0000 UTC m=+781.566991848" Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.582083 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-85c68dddb-9gt5v" podStartSLOduration=4.926991334 podStartE2EDuration="17.582064316s" podCreationTimestamp="2026-01-30 00:22:40 +0000 UTC" firstStartedPulling="2026-01-30 00:22:44.131416048 +0000 UTC m=+768.145478507" lastFinishedPulling="2026-01-30 00:22:56.78648903 +0000 UTC m=+780.800551489" observedRunningTime="2026-01-30 00:22:57.579912683 +0000 UTC m=+781.593975142" watchObservedRunningTime="2026-01-30 00:22:57.582064316 +0000 UTC m=+781.596126775" Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.607582 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-669c9f96b5-m42vj" podStartSLOduration=4.958788787 podStartE2EDuration="17.607560783s" podCreationTimestamp="2026-01-30 00:22:40 +0000 UTC" firstStartedPulling="2026-01-30 00:22:44.132099504 +0000 UTC m=+768.146161963" lastFinishedPulling="2026-01-30 00:22:56.78087149 +0000 UTC m=+780.794933959" observedRunningTime="2026-01-30 00:22:57.60463694 +0000 UTC m=+781.618699399" watchObservedRunningTime="2026-01-30 00:22:57.607560783 +0000 UTC m=+781.621623252" Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.634768 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc" podStartSLOduration=2.2348763480000002 podStartE2EDuration="17.634752971s" podCreationTimestamp="2026-01-30 00:22:40 +0000 UTC" firstStartedPulling="2026-01-30 00:22:41.392641677 +0000 UTC m=+765.406704136" lastFinishedPulling="2026-01-30 00:22:56.7925183 +0000 UTC m=+780.806580759" observedRunningTime="2026-01-30 00:22:57.633024468 +0000 UTC m=+781.647086927" watchObservedRunningTime="2026-01-30 00:22:57.634752971 +0000 UTC m=+781.648815430" Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.876338 5119 scope.go:117] "RemoveContainer" containerID="a4c1afdab4dd88f65a3139334d4d5388dac9f5ade00984b14015178e68190eff" Jan 30 00:22:57 crc kubenswrapper[5119]: I0130 00:22:57.894277 5119 scope.go:117] "RemoveContainer" containerID="6a4b2030dd8d86fcf744bb16ff9535ba4242ebfff88605af5224fd7614b5e888" Jan 30 00:22:59 crc kubenswrapper[5119]: E0130 00:22:59.751043 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:23:01 crc kubenswrapper[5119]: I0130 00:23:01.529744 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" event={"ID":"9d611a14-2f32-40ad-87d6-fc2d6cd916fa","Type":"ContainerStarted","Data":"11e9cb2aa816bbbcb896a7c9dce83feb67ebbdc92876682078610320bf334622"} Jan 30 00:23:05 crc kubenswrapper[5119]: I0130 00:23:05.847241 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-7k54q" podStartSLOduration=9.026605265 podStartE2EDuration="12.847223651s" podCreationTimestamp="2026-01-30 00:22:53 +0000 UTC" firstStartedPulling="2026-01-30 00:22:57.124329392 +0000 UTC m=+781.138391851" lastFinishedPulling="2026-01-30 00:23:00.944947788 +0000 UTC m=+784.959010237" observedRunningTime="2026-01-30 00:23:01.557195609 +0000 UTC m=+785.571258068" watchObservedRunningTime="2026-01-30 00:23:05.847223651 +0000 UTC m=+789.861286110" Jan 30 00:23:05 crc kubenswrapper[5119]: I0130 00:23:05.848242 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-597b96b99b-zq6b9"] Jan 30 00:23:05 crc kubenswrapper[5119]: I0130 00:23:05.853537 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" Jan 30 00:23:05 crc kubenswrapper[5119]: I0130 00:23:05.855640 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"cert-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:23:05 crc kubenswrapper[5119]: I0130 00:23:05.855734 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"cert-manager\"/\"kube-root-ca.crt\"" Jan 30 00:23:05 crc kubenswrapper[5119]: I0130 00:23:05.856799 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"cert-manager\"/\"cert-manager-webhook-dockercfg-rz8zz\"" Jan 30 00:23:05 crc kubenswrapper[5119]: I0130 00:23:05.860159 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-597b96b99b-zq6b9"] Jan 30 00:23:05 crc kubenswrapper[5119]: I0130 00:23:05.950804 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2eea2654-6937-43fb-80d0-6972c01bde0f-bound-sa-token\") pod \"cert-manager-webhook-597b96b99b-zq6b9\" (UID: \"2eea2654-6937-43fb-80d0-6972c01bde0f\") " pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" Jan 30 00:23:05 crc kubenswrapper[5119]: I0130 00:23:05.951113 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvhxg\" (UniqueName: \"kubernetes.io/projected/2eea2654-6937-43fb-80d0-6972c01bde0f-kube-api-access-lvhxg\") pod \"cert-manager-webhook-597b96b99b-zq6b9\" (UID: \"2eea2654-6937-43fb-80d0-6972c01bde0f\") " pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" Jan 30 00:23:06 crc kubenswrapper[5119]: I0130 00:23:06.051867 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2eea2654-6937-43fb-80d0-6972c01bde0f-bound-sa-token\") pod \"cert-manager-webhook-597b96b99b-zq6b9\" (UID: \"2eea2654-6937-43fb-80d0-6972c01bde0f\") " pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" Jan 30 00:23:06 crc kubenswrapper[5119]: I0130 00:23:06.051916 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-lvhxg\" (UniqueName: \"kubernetes.io/projected/2eea2654-6937-43fb-80d0-6972c01bde0f-kube-api-access-lvhxg\") pod \"cert-manager-webhook-597b96b99b-zq6b9\" (UID: \"2eea2654-6937-43fb-80d0-6972c01bde0f\") " pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" Jan 30 00:23:06 crc kubenswrapper[5119]: I0130 00:23:06.074796 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvhxg\" (UniqueName: \"kubernetes.io/projected/2eea2654-6937-43fb-80d0-6972c01bde0f-kube-api-access-lvhxg\") pod \"cert-manager-webhook-597b96b99b-zq6b9\" (UID: \"2eea2654-6937-43fb-80d0-6972c01bde0f\") " pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" Jan 30 00:23:06 crc kubenswrapper[5119]: I0130 00:23:06.077793 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2eea2654-6937-43fb-80d0-6972c01bde0f-bound-sa-token\") pod \"cert-manager-webhook-597b96b99b-zq6b9\" (UID: \"2eea2654-6937-43fb-80d0-6972c01bde0f\") " pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" Jan 30 00:23:06 crc kubenswrapper[5119]: I0130 00:23:06.170506 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" Jan 30 00:23:06 crc kubenswrapper[5119]: I0130 00:23:06.642024 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-597b96b99b-zq6b9"] Jan 30 00:23:07 crc kubenswrapper[5119]: I0130 00:23:07.565213 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" event={"ID":"2eea2654-6937-43fb-80d0-6972c01bde0f","Type":"ContainerStarted","Data":"734206128cf9fcba1e6f99884d81044544dad1b36c243479cbb5dea337c9952e"} Jan 30 00:23:08 crc kubenswrapper[5119]: I0130 00:23:08.515267 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-669c9f96b5-m42vj" Jan 30 00:23:08 crc kubenswrapper[5119]: I0130 00:23:08.591784 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-8966b78d4-4p2l6"] Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.038155 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-8966b78d4-4p2l6"] Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.038563 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.040631 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"cert-manager\"/\"cert-manager-cainjector-dockercfg-mr5b8\"" Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.195198 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0918a586-edb9-43df-aea6-412adcf6ded3-bound-sa-token\") pod \"cert-manager-cainjector-8966b78d4-4p2l6\" (UID: \"0918a586-edb9-43df-aea6-412adcf6ded3\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.195317 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gspql\" (UniqueName: \"kubernetes.io/projected/0918a586-edb9-43df-aea6-412adcf6ded3-kube-api-access-gspql\") pod \"cert-manager-cainjector-8966b78d4-4p2l6\" (UID: \"0918a586-edb9-43df-aea6-412adcf6ded3\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.296935 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0918a586-edb9-43df-aea6-412adcf6ded3-bound-sa-token\") pod \"cert-manager-cainjector-8966b78d4-4p2l6\" (UID: \"0918a586-edb9-43df-aea6-412adcf6ded3\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.297008 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gspql\" (UniqueName: \"kubernetes.io/projected/0918a586-edb9-43df-aea6-412adcf6ded3-kube-api-access-gspql\") pod \"cert-manager-cainjector-8966b78d4-4p2l6\" (UID: \"0918a586-edb9-43df-aea6-412adcf6ded3\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.316701 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0918a586-edb9-43df-aea6-412adcf6ded3-bound-sa-token\") pod \"cert-manager-cainjector-8966b78d4-4p2l6\" (UID: \"0918a586-edb9-43df-aea6-412adcf6ded3\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.320045 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-gspql\" (UniqueName: \"kubernetes.io/projected/0918a586-edb9-43df-aea6-412adcf6ded3-kube-api-access-gspql\") pod \"cert-manager-cainjector-8966b78d4-4p2l6\" (UID: \"0918a586-edb9-43df-aea6-412adcf6ded3\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.361531 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" Jan 30 00:23:09 crc kubenswrapper[5119]: I0130 00:23:09.688824 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-8966b78d4-4p2l6"] Jan 30 00:23:10 crc kubenswrapper[5119]: I0130 00:23:10.594112 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" event={"ID":"0918a586-edb9-43df-aea6-412adcf6ded3","Type":"ContainerStarted","Data":"c42267a1f9067f17b74c80dd4f14ce86970c91150bd5d633f726fdcc31030c91"} Jan 30 00:23:11 crc kubenswrapper[5119]: E0130 00:23:11.783268 5119 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:23:11 crc kubenswrapper[5119]: E0130 00:23:11.783508 5119 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z96jn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6_openshift-marketplace(4e1cb387-b40d-4ab0-867c-b468c70b7ae8): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:23:11 crc kubenswrapper[5119]: E0130 00:23:11.784773 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:23:13 crc kubenswrapper[5119]: I0130 00:23:13.618085 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" event={"ID":"2eea2654-6937-43fb-80d0-6972c01bde0f","Type":"ContainerStarted","Data":"90ee04bb592aa30021b2b2bbf927bcddbb6fc418ce22027ae7c7afd97b81df3f"} Jan 30 00:23:13 crc kubenswrapper[5119]: I0130 00:23:13.618424 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" Jan 30 00:23:13 crc kubenswrapper[5119]: I0130 00:23:13.619889 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" event={"ID":"0918a586-edb9-43df-aea6-412adcf6ded3","Type":"ContainerStarted","Data":"8453047a39f16bad8f7e9a812d4641af5c8094b389fe33f22aeda6effb2782cd"} Jan 30 00:23:13 crc kubenswrapper[5119]: I0130 00:23:13.634059 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" podStartSLOduration=2.6091466370000003 podStartE2EDuration="8.634040398s" podCreationTimestamp="2026-01-30 00:23:05 +0000 UTC" firstStartedPulling="2026-01-30 00:23:06.656664304 +0000 UTC m=+790.670726763" lastFinishedPulling="2026-01-30 00:23:12.681558065 +0000 UTC m=+796.695620524" observedRunningTime="2026-01-30 00:23:13.630737285 +0000 UTC m=+797.644799764" watchObservedRunningTime="2026-01-30 00:23:13.634040398 +0000 UTC m=+797.648102867" Jan 30 00:23:13 crc kubenswrapper[5119]: I0130 00:23:13.645126 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-8966b78d4-4p2l6" podStartSLOduration=2.692243936 podStartE2EDuration="5.645104354s" podCreationTimestamp="2026-01-30 00:23:08 +0000 UTC" firstStartedPulling="2026-01-30 00:23:09.723900707 +0000 UTC m=+793.737963166" lastFinishedPulling="2026-01-30 00:23:12.676761125 +0000 UTC m=+796.690823584" observedRunningTime="2026-01-30 00:23:13.642787576 +0000 UTC m=+797.656850035" watchObservedRunningTime="2026-01-30 00:23:13.645104354 +0000 UTC m=+797.659166813" Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.475581 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-759f64656b-r4bs5"] Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.484714 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-759f64656b-r4bs5"] Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.485037 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-759f64656b-r4bs5" Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.487676 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"cert-manager\"/\"cert-manager-dockercfg-75jwm\"" Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.594138 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/54a7d06e-dc08-407c-8aad-bcf8967edc15-bound-sa-token\") pod \"cert-manager-759f64656b-r4bs5\" (UID: \"54a7d06e-dc08-407c-8aad-bcf8967edc15\") " pod="cert-manager/cert-manager-759f64656b-r4bs5" Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.594619 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md259\" (UniqueName: \"kubernetes.io/projected/54a7d06e-dc08-407c-8aad-bcf8967edc15-kube-api-access-md259\") pod \"cert-manager-759f64656b-r4bs5\" (UID: \"54a7d06e-dc08-407c-8aad-bcf8967edc15\") " pod="cert-manager/cert-manager-759f64656b-r4bs5" Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.696180 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/54a7d06e-dc08-407c-8aad-bcf8967edc15-bound-sa-token\") pod \"cert-manager-759f64656b-r4bs5\" (UID: \"54a7d06e-dc08-407c-8aad-bcf8967edc15\") " pod="cert-manager/cert-manager-759f64656b-r4bs5" Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.696537 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-md259\" (UniqueName: \"kubernetes.io/projected/54a7d06e-dc08-407c-8aad-bcf8967edc15-kube-api-access-md259\") pod \"cert-manager-759f64656b-r4bs5\" (UID: \"54a7d06e-dc08-407c-8aad-bcf8967edc15\") " pod="cert-manager/cert-manager-759f64656b-r4bs5" Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.716564 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/54a7d06e-dc08-407c-8aad-bcf8967edc15-bound-sa-token\") pod \"cert-manager-759f64656b-r4bs5\" (UID: \"54a7d06e-dc08-407c-8aad-bcf8967edc15\") " pod="cert-manager/cert-manager-759f64656b-r4bs5" Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.721633 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-md259\" (UniqueName: \"kubernetes.io/projected/54a7d06e-dc08-407c-8aad-bcf8967edc15-kube-api-access-md259\") pod \"cert-manager-759f64656b-r4bs5\" (UID: \"54a7d06e-dc08-407c-8aad-bcf8967edc15\") " pod="cert-manager/cert-manager-759f64656b-r4bs5" Jan 30 00:23:16 crc kubenswrapper[5119]: I0130 00:23:16.809616 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-759f64656b-r4bs5" Jan 30 00:23:17 crc kubenswrapper[5119]: W0130 00:23:17.073421 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a7d06e_dc08_407c_8aad_bcf8967edc15.slice/crio-e37920c1e8b5ecf3a313c07dc64169f0c10178975ce3b142447cd4b4b0c861a6 WatchSource:0}: Error finding container e37920c1e8b5ecf3a313c07dc64169f0c10178975ce3b142447cd4b4b0c861a6: Status 404 returned error can't find the container with id e37920c1e8b5ecf3a313c07dc64169f0c10178975ce3b142447cd4b4b0c861a6 Jan 30 00:23:17 crc kubenswrapper[5119]: I0130 00:23:17.074032 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-759f64656b-r4bs5"] Jan 30 00:23:17 crc kubenswrapper[5119]: I0130 00:23:17.646044 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-759f64656b-r4bs5" event={"ID":"54a7d06e-dc08-407c-8aad-bcf8967edc15","Type":"ContainerStarted","Data":"0d11236a53a1c3cf64f9e635b221d699a27bc422ec15c2197220150083d5f2f3"} Jan 30 00:23:17 crc kubenswrapper[5119]: I0130 00:23:17.646079 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-759f64656b-r4bs5" event={"ID":"54a7d06e-dc08-407c-8aad-bcf8967edc15","Type":"ContainerStarted","Data":"e37920c1e8b5ecf3a313c07dc64169f0c10178975ce3b142447cd4b4b0c861a6"} Jan 30 00:23:17 crc kubenswrapper[5119]: I0130 00:23:17.666166 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-759f64656b-r4bs5" podStartSLOduration=1.666139502 podStartE2EDuration="1.666139502s" podCreationTimestamp="2026-01-30 00:23:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:23:17.658507581 +0000 UTC m=+801.672570040" watchObservedRunningTime="2026-01-30 00:23:17.666139502 +0000 UTC m=+801.680202001" Jan 30 00:23:19 crc kubenswrapper[5119]: I0130 00:23:19.628475 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-597b96b99b-zq6b9" Jan 30 00:23:23 crc kubenswrapper[5119]: E0130 00:23:23.751064 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:23:24 crc kubenswrapper[5119]: I0130 00:23:24.371251 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:23:24 crc kubenswrapper[5119]: I0130 00:23:24.371756 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:23:24 crc kubenswrapper[5119]: I0130 00:23:24.371967 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:23:24 crc kubenswrapper[5119]: I0130 00:23:24.372919 5119 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"26b3207f3ca251d191e8246615fc9445ba3613afe23d646d0e32410e7bcd59ac"} pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:23:24 crc kubenswrapper[5119]: I0130 00:23:24.373082 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" containerID="cri-o://26b3207f3ca251d191e8246615fc9445ba3613afe23d646d0e32410e7bcd59ac" gracePeriod=600 Jan 30 00:23:24 crc kubenswrapper[5119]: I0130 00:23:24.685906 5119 generic.go:358] "Generic (PLEG): container finished" podID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerID="26b3207f3ca251d191e8246615fc9445ba3613afe23d646d0e32410e7bcd59ac" exitCode=0 Jan 30 00:23:24 crc kubenswrapper[5119]: I0130 00:23:24.686018 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerDied","Data":"26b3207f3ca251d191e8246615fc9445ba3613afe23d646d0e32410e7bcd59ac"} Jan 30 00:23:24 crc kubenswrapper[5119]: I0130 00:23:24.686085 5119 scope.go:117] "RemoveContainer" containerID="acb18e93ac4afc8e87c29f0f393415c46b320cde48c74a568ec3f5fc1b3d28d2" Jan 30 00:23:27 crc kubenswrapper[5119]: I0130 00:23:27.705288 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerStarted","Data":"9ec78e1c0b5b60bf7c2e8eae0e051e92b3b72af9f5c5d94190fc2bc39b3b1a3e"} Jan 30 00:23:35 crc kubenswrapper[5119]: E0130 00:23:35.750831 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:23:47 crc kubenswrapper[5119]: E0130 00:23:47.752980 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:23:59 crc kubenswrapper[5119]: E0130 00:23:59.988886 5119 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:23:59 crc kubenswrapper[5119]: E0130 00:23:59.989682 5119 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z96jn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6_openshift-marketplace(4e1cb387-b40d-4ab0-867c-b468c70b7ae8): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:23:59 crc kubenswrapper[5119]: E0130 00:23:59.991190 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.141923 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495544-b4628"] Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.147471 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495544-b4628" Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.149831 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-2vtgf\"" Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.150206 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.150584 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.154027 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495544-b4628"] Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.185273 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mqzb\" (UniqueName: \"kubernetes.io/projected/5fe9ce46-6464-4a80-82d1-73ac2aab552e-kube-api-access-4mqzb\") pod \"auto-csr-approver-29495544-b4628\" (UID: \"5fe9ce46-6464-4a80-82d1-73ac2aab552e\") " pod="openshift-infra/auto-csr-approver-29495544-b4628" Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.287228 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-4mqzb\" (UniqueName: \"kubernetes.io/projected/5fe9ce46-6464-4a80-82d1-73ac2aab552e-kube-api-access-4mqzb\") pod \"auto-csr-approver-29495544-b4628\" (UID: \"5fe9ce46-6464-4a80-82d1-73ac2aab552e\") " pod="openshift-infra/auto-csr-approver-29495544-b4628" Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.309354 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mqzb\" (UniqueName: \"kubernetes.io/projected/5fe9ce46-6464-4a80-82d1-73ac2aab552e-kube-api-access-4mqzb\") pod \"auto-csr-approver-29495544-b4628\" (UID: \"5fe9ce46-6464-4a80-82d1-73ac2aab552e\") " pod="openshift-infra/auto-csr-approver-29495544-b4628" Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.484592 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495544-b4628" Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.675385 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495544-b4628"] Jan 30 00:24:00 crc kubenswrapper[5119]: W0130 00:24:00.681896 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5fe9ce46_6464_4a80_82d1_73ac2aab552e.slice/crio-e05a48beea60581f6ff3affec8ed16d5b162726b65bd845aaa3930d6f712366d WatchSource:0}: Error finding container e05a48beea60581f6ff3affec8ed16d5b162726b65bd845aaa3930d6f712366d: Status 404 returned error can't find the container with id e05a48beea60581f6ff3affec8ed16d5b162726b65bd845aaa3930d6f712366d Jan 30 00:24:00 crc kubenswrapper[5119]: I0130 00:24:00.897551 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495544-b4628" event={"ID":"5fe9ce46-6464-4a80-82d1-73ac2aab552e","Type":"ContainerStarted","Data":"e05a48beea60581f6ff3affec8ed16d5b162726b65bd845aaa3930d6f712366d"} Jan 30 00:24:02 crc kubenswrapper[5119]: I0130 00:24:02.908075 5119 generic.go:358] "Generic (PLEG): container finished" podID="5fe9ce46-6464-4a80-82d1-73ac2aab552e" containerID="e69319c6b4a96bc63046bbb8f9088b885f4930ad302cfb25cfbcb910ea413438" exitCode=0 Jan 30 00:24:02 crc kubenswrapper[5119]: I0130 00:24:02.908131 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495544-b4628" event={"ID":"5fe9ce46-6464-4a80-82d1-73ac2aab552e","Type":"ContainerDied","Data":"e69319c6b4a96bc63046bbb8f9088b885f4930ad302cfb25cfbcb910ea413438"} Jan 30 00:24:04 crc kubenswrapper[5119]: I0130 00:24:04.217044 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495544-b4628" Jan 30 00:24:04 crc kubenswrapper[5119]: I0130 00:24:04.343189 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mqzb\" (UniqueName: \"kubernetes.io/projected/5fe9ce46-6464-4a80-82d1-73ac2aab552e-kube-api-access-4mqzb\") pod \"5fe9ce46-6464-4a80-82d1-73ac2aab552e\" (UID: \"5fe9ce46-6464-4a80-82d1-73ac2aab552e\") " Jan 30 00:24:04 crc kubenswrapper[5119]: I0130 00:24:04.352743 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe9ce46-6464-4a80-82d1-73ac2aab552e-kube-api-access-4mqzb" (OuterVolumeSpecName: "kube-api-access-4mqzb") pod "5fe9ce46-6464-4a80-82d1-73ac2aab552e" (UID: "5fe9ce46-6464-4a80-82d1-73ac2aab552e"). InnerVolumeSpecName "kube-api-access-4mqzb". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:24:04 crc kubenswrapper[5119]: I0130 00:24:04.446088 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4mqzb\" (UniqueName: \"kubernetes.io/projected/5fe9ce46-6464-4a80-82d1-73ac2aab552e-kube-api-access-4mqzb\") on node \"crc\" DevicePath \"\"" Jan 30 00:24:04 crc kubenswrapper[5119]: I0130 00:24:04.920749 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495544-b4628" event={"ID":"5fe9ce46-6464-4a80-82d1-73ac2aab552e","Type":"ContainerDied","Data":"e05a48beea60581f6ff3affec8ed16d5b162726b65bd845aaa3930d6f712366d"} Jan 30 00:24:04 crc kubenswrapper[5119]: I0130 00:24:04.920791 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e05a48beea60581f6ff3affec8ed16d5b162726b65bd845aaa3930d6f712366d" Jan 30 00:24:04 crc kubenswrapper[5119]: I0130 00:24:04.920927 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495544-b4628" Jan 30 00:24:05 crc kubenswrapper[5119]: I0130 00:24:05.305614 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29495538-4rznx"] Jan 30 00:24:05 crc kubenswrapper[5119]: I0130 00:24:05.310726 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29495538-4rznx"] Jan 30 00:24:06 crc kubenswrapper[5119]: I0130 00:24:06.756468 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1" path="/var/lib/kubelet/pods/dca26ac5-6d5c-4e6f-86b4-bc4548d28bf1/volumes" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.450053 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zmcfl"] Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.451115 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="5fe9ce46-6464-4a80-82d1-73ac2aab552e" containerName="oc" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.451132 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fe9ce46-6464-4a80-82d1-73ac2aab552e" containerName="oc" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.451297 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="5fe9ce46-6464-4a80-82d1-73ac2aab552e" containerName="oc" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.462149 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zmcfl"] Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.462297 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.563614 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-utilities\") pod \"community-operators-zmcfl\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.563705 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fhb2\" (UniqueName: \"kubernetes.io/projected/74c3ebf7-9520-4072-ac05-4d811b570d04-kube-api-access-2fhb2\") pod \"community-operators-zmcfl\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.563729 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-catalog-content\") pod \"community-operators-zmcfl\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.665488 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2fhb2\" (UniqueName: \"kubernetes.io/projected/74c3ebf7-9520-4072-ac05-4d811b570d04-kube-api-access-2fhb2\") pod \"community-operators-zmcfl\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.665790 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-catalog-content\") pod \"community-operators-zmcfl\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.665867 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-utilities\") pod \"community-operators-zmcfl\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.666239 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-catalog-content\") pod \"community-operators-zmcfl\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.666266 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-utilities\") pod \"community-operators-zmcfl\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.684136 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fhb2\" (UniqueName: \"kubernetes.io/projected/74c3ebf7-9520-4072-ac05-4d811b570d04-kube-api-access-2fhb2\") pod \"community-operators-zmcfl\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.777765 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:13 crc kubenswrapper[5119]: I0130 00:24:13.978459 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zmcfl"] Jan 30 00:24:14 crc kubenswrapper[5119]: I0130 00:24:14.994620 5119 generic.go:358] "Generic (PLEG): container finished" podID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerID="c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d" exitCode=0 Jan 30 00:24:14 crc kubenswrapper[5119]: I0130 00:24:14.994807 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zmcfl" event={"ID":"74c3ebf7-9520-4072-ac05-4d811b570d04","Type":"ContainerDied","Data":"c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d"} Jan 30 00:24:14 crc kubenswrapper[5119]: I0130 00:24:14.994844 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zmcfl" event={"ID":"74c3ebf7-9520-4072-ac05-4d811b570d04","Type":"ContainerStarted","Data":"514af03bac9e7c95fc1efb562c6ee3d5eddd50427b5eb2f91b10852fdf2a6c24"} Jan 30 00:24:15 crc kubenswrapper[5119]: E0130 00:24:15.752117 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:24:16 crc kubenswrapper[5119]: I0130 00:24:16.002584 5119 generic.go:358] "Generic (PLEG): container finished" podID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerID="c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0" exitCode=0 Jan 30 00:24:16 crc kubenswrapper[5119]: I0130 00:24:16.002627 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zmcfl" event={"ID":"74c3ebf7-9520-4072-ac05-4d811b570d04","Type":"ContainerDied","Data":"c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0"} Jan 30 00:24:17 crc kubenswrapper[5119]: I0130 00:24:17.011715 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zmcfl" event={"ID":"74c3ebf7-9520-4072-ac05-4d811b570d04","Type":"ContainerStarted","Data":"474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e"} Jan 30 00:24:17 crc kubenswrapper[5119]: I0130 00:24:17.033183 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zmcfl" podStartSLOduration=3.418547062 podStartE2EDuration="4.033166421s" podCreationTimestamp="2026-01-30 00:24:13 +0000 UTC" firstStartedPulling="2026-01-30 00:24:14.995669849 +0000 UTC m=+859.009732318" lastFinishedPulling="2026-01-30 00:24:15.610289218 +0000 UTC m=+859.624351677" observedRunningTime="2026-01-30 00:24:17.030284899 +0000 UTC m=+861.044347448" watchObservedRunningTime="2026-01-30 00:24:17.033166421 +0000 UTC m=+861.047228870" Jan 30 00:24:23 crc kubenswrapper[5119]: I0130 00:24:23.778976 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:23 crc kubenswrapper[5119]: I0130 00:24:23.779044 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:23 crc kubenswrapper[5119]: I0130 00:24:23.834868 5119 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:24 crc kubenswrapper[5119]: I0130 00:24:24.105761 5119 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:24 crc kubenswrapper[5119]: I0130 00:24:24.153502 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zmcfl"] Jan 30 00:24:26 crc kubenswrapper[5119]: I0130 00:24:26.075836 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zmcfl" podUID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerName="registry-server" containerID="cri-o://474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e" gracePeriod=2 Jan 30 00:24:26 crc kubenswrapper[5119]: I0130 00:24:26.528697 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:26 crc kubenswrapper[5119]: I0130 00:24:26.649696 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fhb2\" (UniqueName: \"kubernetes.io/projected/74c3ebf7-9520-4072-ac05-4d811b570d04-kube-api-access-2fhb2\") pod \"74c3ebf7-9520-4072-ac05-4d811b570d04\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " Jan 30 00:24:26 crc kubenswrapper[5119]: I0130 00:24:26.649829 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-utilities\") pod \"74c3ebf7-9520-4072-ac05-4d811b570d04\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " Jan 30 00:24:26 crc kubenswrapper[5119]: I0130 00:24:26.649882 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-catalog-content\") pod \"74c3ebf7-9520-4072-ac05-4d811b570d04\" (UID: \"74c3ebf7-9520-4072-ac05-4d811b570d04\") " Jan 30 00:24:26 crc kubenswrapper[5119]: I0130 00:24:26.652090 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-utilities" (OuterVolumeSpecName: "utilities") pod "74c3ebf7-9520-4072-ac05-4d811b570d04" (UID: "74c3ebf7-9520-4072-ac05-4d811b570d04"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:24:26 crc kubenswrapper[5119]: I0130 00:24:26.664355 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74c3ebf7-9520-4072-ac05-4d811b570d04-kube-api-access-2fhb2" (OuterVolumeSpecName: "kube-api-access-2fhb2") pod "74c3ebf7-9520-4072-ac05-4d811b570d04" (UID: "74c3ebf7-9520-4072-ac05-4d811b570d04"). InnerVolumeSpecName "kube-api-access-2fhb2". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:24:26 crc kubenswrapper[5119]: I0130 00:24:26.765851 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-2fhb2\" (UniqueName: \"kubernetes.io/projected/74c3ebf7-9520-4072-ac05-4d811b570d04-kube-api-access-2fhb2\") on node \"crc\" DevicePath \"\"" Jan 30 00:24:26 crc kubenswrapper[5119]: I0130 00:24:26.765962 5119 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.083737 5119 generic.go:358] "Generic (PLEG): container finished" podID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerID="474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e" exitCode=0 Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.083818 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zmcfl" event={"ID":"74c3ebf7-9520-4072-ac05-4d811b570d04","Type":"ContainerDied","Data":"474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e"} Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.085443 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zmcfl" event={"ID":"74c3ebf7-9520-4072-ac05-4d811b570d04","Type":"ContainerDied","Data":"514af03bac9e7c95fc1efb562c6ee3d5eddd50427b5eb2f91b10852fdf2a6c24"} Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.085465 5119 scope.go:117] "RemoveContainer" containerID="474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.083884 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zmcfl" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.113596 5119 scope.go:117] "RemoveContainer" containerID="c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.131641 5119 scope.go:117] "RemoveContainer" containerID="c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.166140 5119 scope.go:117] "RemoveContainer" containerID="474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e" Jan 30 00:24:27 crc kubenswrapper[5119]: E0130 00:24:27.166615 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e\": container with ID starting with 474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e not found: ID does not exist" containerID="474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.166674 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e"} err="failed to get container status \"474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e\": rpc error: code = NotFound desc = could not find container \"474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e\": container with ID starting with 474d6e2f4b9377351df1996a968851bac9b8c9cd4a5bde7c93f92b497b15f75e not found: ID does not exist" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.166704 5119 scope.go:117] "RemoveContainer" containerID="c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0" Jan 30 00:24:27 crc kubenswrapper[5119]: E0130 00:24:27.167167 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0\": container with ID starting with c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0 not found: ID does not exist" containerID="c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.167201 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0"} err="failed to get container status \"c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0\": rpc error: code = NotFound desc = could not find container \"c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0\": container with ID starting with c50ec1e454d507654c9560d75b3ac91a004d684930c40defa53f5d427c54b0c0 not found: ID does not exist" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.167225 5119 scope.go:117] "RemoveContainer" containerID="c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d" Jan 30 00:24:27 crc kubenswrapper[5119]: E0130 00:24:27.167684 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d\": container with ID starting with c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d not found: ID does not exist" containerID="c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.167713 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d"} err="failed to get container status \"c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d\": rpc error: code = NotFound desc = could not find container \"c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d\": container with ID starting with c674f3cd247141bfbc6da33d51fabac4b2be1eed9e097626722698d9e1f3ae6d not found: ID does not exist" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.260991 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "74c3ebf7-9520-4072-ac05-4d811b570d04" (UID: "74c3ebf7-9520-4072-ac05-4d811b570d04"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.271350 5119 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74c3ebf7-9520-4072-ac05-4d811b570d04-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.424669 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zmcfl"] Jan 30 00:24:27 crc kubenswrapper[5119]: I0130 00:24:27.433825 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zmcfl"] Jan 30 00:24:28 crc kubenswrapper[5119]: I0130 00:24:28.758214 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74c3ebf7-9520-4072-ac05-4d811b570d04" path="/var/lib/kubelet/pods/74c3ebf7-9520-4072-ac05-4d811b570d04/volumes" Jan 30 00:24:30 crc kubenswrapper[5119]: E0130 00:24:30.759849 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:24:42 crc kubenswrapper[5119]: E0130 00:24:42.751951 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:24:57 crc kubenswrapper[5119]: I0130 00:24:57.065598 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qxpww_0cf99dcb-47cd-4077-9fb1-e39bf209e431/kube-multus/0.log" Jan 30 00:24:57 crc kubenswrapper[5119]: I0130 00:24:57.065756 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qxpww_0cf99dcb-47cd-4077-9fb1-e39bf209e431/kube-multus/0.log" Jan 30 00:24:57 crc kubenswrapper[5119]: I0130 00:24:57.070619 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log" Jan 30 00:24:57 crc kubenswrapper[5119]: I0130 00:24:57.070653 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log" Jan 30 00:24:57 crc kubenswrapper[5119]: E0130 00:24:57.750845 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:24:57 crc kubenswrapper[5119]: I0130 00:24:57.970576 5119 scope.go:117] "RemoveContainer" containerID="c437b0536f5363caf54d0562026c6425415aeb3e3104961212e0ff2b30744c71" Jan 30 00:25:10 crc kubenswrapper[5119]: E0130 00:25:10.752042 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:25:24 crc kubenswrapper[5119]: I0130 00:25:24.750824 5119 provider.go:93] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 00:25:24 crc kubenswrapper[5119]: E0130 00:25:24.777293 5119 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:25:24 crc kubenswrapper[5119]: E0130 00:25:24.777926 5119 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z96jn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6_openshift-marketplace(4e1cb387-b40d-4ab0-867c-b468c70b7ae8): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:25:24 crc kubenswrapper[5119]: E0130 00:25:24.779339 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:25:36 crc kubenswrapper[5119]: E0130 00:25:36.761111 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.881675 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xhpkr/must-gather-787k4"] Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.882927 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerName="registry-server" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.882944 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerName="registry-server" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.882989 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerName="extract-utilities" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.882997 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerName="extract-utilities" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.883019 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerName="extract-content" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.883027 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerName="extract-content" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.883150 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="74c3ebf7-9520-4072-ac05-4d811b570d04" containerName="registry-server" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.891047 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-xhpkr/must-gather-787k4"] Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.891205 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xhpkr/must-gather-787k4" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.894139 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-must-gather-xhpkr\"/\"default-dockercfg-88btl\"" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.894228 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-must-gather-xhpkr\"/\"kube-root-ca.crt\"" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.894228 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-must-gather-xhpkr\"/\"openshift-service-ca.crt\"" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.955193 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cebd7683-456a-4d39-9701-c59156900afc-must-gather-output\") pod \"must-gather-787k4\" (UID: \"cebd7683-456a-4d39-9701-c59156900afc\") " pod="openshift-must-gather-xhpkr/must-gather-787k4" Jan 30 00:25:46 crc kubenswrapper[5119]: I0130 00:25:46.955274 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkwpp\" (UniqueName: \"kubernetes.io/projected/cebd7683-456a-4d39-9701-c59156900afc-kube-api-access-lkwpp\") pod \"must-gather-787k4\" (UID: \"cebd7683-456a-4d39-9701-c59156900afc\") " pod="openshift-must-gather-xhpkr/must-gather-787k4" Jan 30 00:25:47 crc kubenswrapper[5119]: I0130 00:25:47.056422 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cebd7683-456a-4d39-9701-c59156900afc-must-gather-output\") pod \"must-gather-787k4\" (UID: \"cebd7683-456a-4d39-9701-c59156900afc\") " pod="openshift-must-gather-xhpkr/must-gather-787k4" Jan 30 00:25:47 crc kubenswrapper[5119]: I0130 00:25:47.056515 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-lkwpp\" (UniqueName: \"kubernetes.io/projected/cebd7683-456a-4d39-9701-c59156900afc-kube-api-access-lkwpp\") pod \"must-gather-787k4\" (UID: \"cebd7683-456a-4d39-9701-c59156900afc\") " pod="openshift-must-gather-xhpkr/must-gather-787k4" Jan 30 00:25:47 crc kubenswrapper[5119]: I0130 00:25:47.056972 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cebd7683-456a-4d39-9701-c59156900afc-must-gather-output\") pod \"must-gather-787k4\" (UID: \"cebd7683-456a-4d39-9701-c59156900afc\") " pod="openshift-must-gather-xhpkr/must-gather-787k4" Jan 30 00:25:47 crc kubenswrapper[5119]: I0130 00:25:47.082360 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkwpp\" (UniqueName: \"kubernetes.io/projected/cebd7683-456a-4d39-9701-c59156900afc-kube-api-access-lkwpp\") pod \"must-gather-787k4\" (UID: \"cebd7683-456a-4d39-9701-c59156900afc\") " pod="openshift-must-gather-xhpkr/must-gather-787k4" Jan 30 00:25:47 crc kubenswrapper[5119]: I0130 00:25:47.214572 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xhpkr/must-gather-787k4" Jan 30 00:25:47 crc kubenswrapper[5119]: I0130 00:25:47.441093 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-xhpkr/must-gather-787k4"] Jan 30 00:25:47 crc kubenswrapper[5119]: I0130 00:25:47.596884 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xhpkr/must-gather-787k4" event={"ID":"cebd7683-456a-4d39-9701-c59156900afc","Type":"ContainerStarted","Data":"3fa7db89af553a13d22a2c89b308ae8ed13b281a5d561a27005bcbbf372cd63d"} Jan 30 00:25:49 crc kubenswrapper[5119]: E0130 00:25:49.751853 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:25:53 crc kubenswrapper[5119]: I0130 00:25:53.641958 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xhpkr/must-gather-787k4" event={"ID":"cebd7683-456a-4d39-9701-c59156900afc","Type":"ContainerStarted","Data":"3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39"} Jan 30 00:25:54 crc kubenswrapper[5119]: I0130 00:25:54.371640 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:25:54 crc kubenswrapper[5119]: I0130 00:25:54.371733 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:25:54 crc kubenswrapper[5119]: I0130 00:25:54.650067 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xhpkr/must-gather-787k4" event={"ID":"cebd7683-456a-4d39-9701-c59156900afc","Type":"ContainerStarted","Data":"bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1"} Jan 30 00:25:54 crc kubenswrapper[5119]: I0130 00:25:54.702910 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-xhpkr/must-gather-787k4" podStartSLOduration=2.754421287 podStartE2EDuration="8.702890288s" podCreationTimestamp="2026-01-30 00:25:46 +0000 UTC" firstStartedPulling="2026-01-30 00:25:47.461094073 +0000 UTC m=+951.475156532" lastFinishedPulling="2026-01-30 00:25:53.409563074 +0000 UTC m=+957.423625533" observedRunningTime="2026-01-30 00:25:54.70172783 +0000 UTC m=+958.715790299" watchObservedRunningTime="2026-01-30 00:25:54.702890288 +0000 UTC m=+958.716952747" Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.132173 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495546-ln289"] Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.196471 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495546-ln289"] Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.196616 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495546-ln289" Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.199439 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.199576 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-2vtgf\"" Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.199517 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.366011 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqv69\" (UniqueName: \"kubernetes.io/projected/bddb3979-a7ee-4c6b-89c8-d33a171a67c8-kube-api-access-fqv69\") pod \"auto-csr-approver-29495546-ln289\" (UID: \"bddb3979-a7ee-4c6b-89c8-d33a171a67c8\") " pod="openshift-infra/auto-csr-approver-29495546-ln289" Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.467070 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-fqv69\" (UniqueName: \"kubernetes.io/projected/bddb3979-a7ee-4c6b-89c8-d33a171a67c8-kube-api-access-fqv69\") pod \"auto-csr-approver-29495546-ln289\" (UID: \"bddb3979-a7ee-4c6b-89c8-d33a171a67c8\") " pod="openshift-infra/auto-csr-approver-29495546-ln289" Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.485215 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqv69\" (UniqueName: \"kubernetes.io/projected/bddb3979-a7ee-4c6b-89c8-d33a171a67c8-kube-api-access-fqv69\") pod \"auto-csr-approver-29495546-ln289\" (UID: \"bddb3979-a7ee-4c6b-89c8-d33a171a67c8\") " pod="openshift-infra/auto-csr-approver-29495546-ln289" Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.516298 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495546-ln289" Jan 30 00:26:00 crc kubenswrapper[5119]: I0130 00:26:00.952503 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495546-ln289"] Jan 30 00:26:01 crc kubenswrapper[5119]: I0130 00:26:01.697871 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495546-ln289" event={"ID":"bddb3979-a7ee-4c6b-89c8-d33a171a67c8","Type":"ContainerStarted","Data":"c9f3d8ecc7ae12257ecd339d0ea83b98b1a6ac455f0b402216dedbf21205dde2"} Jan 30 00:26:02 crc kubenswrapper[5119]: I0130 00:26:02.705459 5119 generic.go:358] "Generic (PLEG): container finished" podID="bddb3979-a7ee-4c6b-89c8-d33a171a67c8" containerID="fc5d73e45a96df5e0669384b609f96874e3b32af91cffedf25f5fc9f925553bf" exitCode=0 Jan 30 00:26:02 crc kubenswrapper[5119]: I0130 00:26:02.705580 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495546-ln289" event={"ID":"bddb3979-a7ee-4c6b-89c8-d33a171a67c8","Type":"ContainerDied","Data":"fc5d73e45a96df5e0669384b609f96874e3b32af91cffedf25f5fc9f925553bf"} Jan 30 00:26:02 crc kubenswrapper[5119]: E0130 00:26:02.751924 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:26:03 crc kubenswrapper[5119]: I0130 00:26:03.937964 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495546-ln289" Jan 30 00:26:04 crc kubenswrapper[5119]: I0130 00:26:04.005633 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqv69\" (UniqueName: \"kubernetes.io/projected/bddb3979-a7ee-4c6b-89c8-d33a171a67c8-kube-api-access-fqv69\") pod \"bddb3979-a7ee-4c6b-89c8-d33a171a67c8\" (UID: \"bddb3979-a7ee-4c6b-89c8-d33a171a67c8\") " Jan 30 00:26:04 crc kubenswrapper[5119]: I0130 00:26:04.012018 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bddb3979-a7ee-4c6b-89c8-d33a171a67c8-kube-api-access-fqv69" (OuterVolumeSpecName: "kube-api-access-fqv69") pod "bddb3979-a7ee-4c6b-89c8-d33a171a67c8" (UID: "bddb3979-a7ee-4c6b-89c8-d33a171a67c8"). InnerVolumeSpecName "kube-api-access-fqv69". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:26:04 crc kubenswrapper[5119]: I0130 00:26:04.106698 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-fqv69\" (UniqueName: \"kubernetes.io/projected/bddb3979-a7ee-4c6b-89c8-d33a171a67c8-kube-api-access-fqv69\") on node \"crc\" DevicePath \"\"" Jan 30 00:26:04 crc kubenswrapper[5119]: I0130 00:26:04.717093 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495546-ln289" Jan 30 00:26:04 crc kubenswrapper[5119]: I0130 00:26:04.717107 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495546-ln289" event={"ID":"bddb3979-a7ee-4c6b-89c8-d33a171a67c8","Type":"ContainerDied","Data":"c9f3d8ecc7ae12257ecd339d0ea83b98b1a6ac455f0b402216dedbf21205dde2"} Jan 30 00:26:04 crc kubenswrapper[5119]: I0130 00:26:04.717156 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9f3d8ecc7ae12257ecd339d0ea83b98b1a6ac455f0b402216dedbf21205dde2" Jan 30 00:26:04 crc kubenswrapper[5119]: I0130 00:26:04.995001 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29495540-ldthh"] Jan 30 00:26:05 crc kubenswrapper[5119]: I0130 00:26:05.000324 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29495540-ldthh"] Jan 30 00:26:06 crc kubenswrapper[5119]: I0130 00:26:06.755704 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67bc7b41-e975-46b0-acfe-1e4ad46c6679" path="/var/lib/kubelet/pods/67bc7b41-e975-46b0-acfe-1e4ad46c6679/volumes" Jan 30 00:26:16 crc kubenswrapper[5119]: E0130 00:26:16.758143 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:26:24 crc kubenswrapper[5119]: I0130 00:26:24.370796 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:26:24 crc kubenswrapper[5119]: I0130 00:26:24.371367 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:26:27 crc kubenswrapper[5119]: E0130 00:26:27.751117 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:26:30 crc kubenswrapper[5119]: I0130 00:26:30.734352 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-tnqmt_6f3ae473-7d01-46db-ad58-f27062e82346/control-plane-machine-set-operator/0.log" Jan 30 00:26:30 crc kubenswrapper[5119]: I0130 00:26:30.871661 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-755bb95488-sxw6b_682184a8-29d6-4081-99ac-9d5989e169ab/kube-rbac-proxy/0.log" Jan 30 00:26:30 crc kubenswrapper[5119]: I0130 00:26:30.913559 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-755bb95488-sxw6b_682184a8-29d6-4081-99ac-9d5989e169ab/machine-api-operator/0.log" Jan 30 00:26:41 crc kubenswrapper[5119]: I0130 00:26:41.637326 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-759f64656b-r4bs5_54a7d06e-dc08-407c-8aad-bcf8967edc15/cert-manager-controller/0.log" Jan 30 00:26:41 crc kubenswrapper[5119]: I0130 00:26:41.741572 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-8966b78d4-4p2l6_0918a586-edb9-43df-aea6-412adcf6ded3/cert-manager-cainjector/0.log" Jan 30 00:26:41 crc kubenswrapper[5119]: I0130 00:26:41.817380 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-597b96b99b-zq6b9_2eea2654-6937-43fb-80d0-6972c01bde0f/cert-manager-webhook/0.log" Jan 30 00:26:42 crc kubenswrapper[5119]: E0130 00:26:42.751759 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:26:54 crc kubenswrapper[5119]: I0130 00:26:54.138600 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-9bc85b4bf-9x995_6c2bf0c8-b86b-4a47-ad20-b8478b4188b0/prometheus-operator/0.log" Jan 30 00:26:54 crc kubenswrapper[5119]: I0130 00:26:54.256368 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb_34241785-7479-467d-a949-303dc1e64d18/prometheus-operator-admission-webhook/0.log" Jan 30 00:26:54 crc kubenswrapper[5119]: I0130 00:26:54.366753 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc_c1c717fb-e705-4548-8aa0-823e4a6ddd8e/prometheus-operator-admission-webhook/0.log" Jan 30 00:26:54 crc kubenswrapper[5119]: I0130 00:26:54.370884 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:26:54 crc kubenswrapper[5119]: I0130 00:26:54.370968 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:26:54 crc kubenswrapper[5119]: I0130 00:26:54.371015 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:26:54 crc kubenswrapper[5119]: I0130 00:26:54.371698 5119 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9ec78e1c0b5b60bf7c2e8eae0e051e92b3b72af9f5c5d94190fc2bc39b3b1a3e"} pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:26:54 crc kubenswrapper[5119]: I0130 00:26:54.371771 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" containerID="cri-o://9ec78e1c0b5b60bf7c2e8eae0e051e92b3b72af9f5c5d94190fc2bc39b3b1a3e" gracePeriod=600 Jan 30 00:26:54 crc kubenswrapper[5119]: I0130 00:26:54.449878 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-85c68dddb-9gt5v_30e86055-71d9-4e6e-8268-d8f123e74fb5/operator/0.log" Jan 30 00:26:54 crc kubenswrapper[5119]: I0130 00:26:54.570648 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-669c9f96b5-m42vj_8a36cf47-b445-4da0-ad4f-d9f080136a33/perses-operator/0.log" Jan 30 00:26:55 crc kubenswrapper[5119]: I0130 00:26:55.005175 5119 generic.go:358] "Generic (PLEG): container finished" podID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerID="9ec78e1c0b5b60bf7c2e8eae0e051e92b3b72af9f5c5d94190fc2bc39b3b1a3e" exitCode=0 Jan 30 00:26:55 crc kubenswrapper[5119]: I0130 00:26:55.005217 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerDied","Data":"9ec78e1c0b5b60bf7c2e8eae0e051e92b3b72af9f5c5d94190fc2bc39b3b1a3e"} Jan 30 00:26:55 crc kubenswrapper[5119]: I0130 00:26:55.005721 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerStarted","Data":"c8193f46715a0a2c916f33ff7a2bc9590d142e992cb56399f41f212e20577e67"} Jan 30 00:26:55 crc kubenswrapper[5119]: I0130 00:26:55.005749 5119 scope.go:117] "RemoveContainer" containerID="26b3207f3ca251d191e8246615fc9445ba3613afe23d646d0e32410e7bcd59ac" Jan 30 00:26:56 crc kubenswrapper[5119]: E0130 00:26:56.755526 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:26:58 crc kubenswrapper[5119]: I0130 00:26:58.084190 5119 scope.go:117] "RemoveContainer" containerID="8ebf90ce66a36b561e345c80cfcbf6257ac7f7696717432e8f4f1aafd604dab9" Jan 30 00:27:07 crc kubenswrapper[5119]: I0130 00:27:07.198784 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6_4e1cb387-b40d-4ab0-867c-b468c70b7ae8/util/0.log" Jan 30 00:27:07 crc kubenswrapper[5119]: I0130 00:27:07.358687 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6_4e1cb387-b40d-4ab0-867c-b468c70b7ae8/util/0.log" Jan 30 00:27:07 crc kubenswrapper[5119]: I0130 00:27:07.543413 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6_4e1cb387-b40d-4ab0-867c-b468c70b7ae8/util/0.log" Jan 30 00:27:07 crc kubenswrapper[5119]: I0130 00:27:07.678769 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw_51444834-61f9-4867-b791-ae8d97bffd67/util/0.log" Jan 30 00:27:07 crc kubenswrapper[5119]: E0130 00:27:07.751274 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:27:07 crc kubenswrapper[5119]: I0130 00:27:07.871523 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw_51444834-61f9-4867-b791-ae8d97bffd67/util/0.log" Jan 30 00:27:07 crc kubenswrapper[5119]: I0130 00:27:07.876882 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw_51444834-61f9-4867-b791-ae8d97bffd67/pull/0.log" Jan 30 00:27:07 crc kubenswrapper[5119]: I0130 00:27:07.927893 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw_51444834-61f9-4867-b791-ae8d97bffd67/pull/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.079115 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw_51444834-61f9-4867-b791-ae8d97bffd67/extract/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.079967 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw_51444834-61f9-4867-b791-ae8d97bffd67/util/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.124959 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5mwrtw_51444834-61f9-4867-b791-ae8d97bffd67/pull/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.252841 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g_39602e22-ff75-4157-8420-295fb7d31495/util/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.418863 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g_39602e22-ff75-4157-8420-295fb7d31495/util/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.425999 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g_39602e22-ff75-4157-8420-295fb7d31495/pull/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.456360 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g_39602e22-ff75-4157-8420-295fb7d31495/pull/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.600331 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g_39602e22-ff75-4157-8420-295fb7d31495/pull/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.627309 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g_39602e22-ff75-4157-8420-295fb7d31495/extract/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.651265 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f085tb8g_39602e22-ff75-4157-8420-295fb7d31495/util/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.778883 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-85cj4_51c13523-988e-45ad-94d9-effce777308f/extract-utilities/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.925361 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-85cj4_51c13523-988e-45ad-94d9-effce777308f/extract-content/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.951310 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-85cj4_51c13523-988e-45ad-94d9-effce777308f/extract-utilities/0.log" Jan 30 00:27:08 crc kubenswrapper[5119]: I0130 00:27:08.953029 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-85cj4_51c13523-988e-45ad-94d9-effce777308f/extract-content/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.141896 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-85cj4_51c13523-988e-45ad-94d9-effce777308f/extract-content/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.164055 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-85cj4_51c13523-988e-45ad-94d9-effce777308f/extract-utilities/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.223248 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-85cj4_51c13523-988e-45ad-94d9-effce777308f/registry-server/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.320166 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wkk9s_c498e919-938a-4abb-927a-efb274f1c744/extract-utilities/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.498122 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wkk9s_c498e919-938a-4abb-927a-efb274f1c744/extract-utilities/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.501135 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wkk9s_c498e919-938a-4abb-927a-efb274f1c744/extract-content/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.524313 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wkk9s_c498e919-938a-4abb-927a-efb274f1c744/extract-content/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.703877 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wkk9s_c498e919-938a-4abb-927a-efb274f1c744/extract-content/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.712409 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wkk9s_c498e919-938a-4abb-927a-efb274f1c744/extract-utilities/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.843665 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-547dbd544d-2mvgv_5ca130d6-3d7a-4f6a-8ba5-5f0571f62558/marketplace-operator/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.974528 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hlk9w_67405c86-4048-4098-a3e0-12fec1e771df/extract-utilities/0.log" Jan 30 00:27:09 crc kubenswrapper[5119]: I0130 00:27:09.989064 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wkk9s_c498e919-938a-4abb-927a-efb274f1c744/registry-server/0.log" Jan 30 00:27:10 crc kubenswrapper[5119]: I0130 00:27:10.114475 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hlk9w_67405c86-4048-4098-a3e0-12fec1e771df/extract-utilities/0.log" Jan 30 00:27:10 crc kubenswrapper[5119]: I0130 00:27:10.117935 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hlk9w_67405c86-4048-4098-a3e0-12fec1e771df/extract-content/0.log" Jan 30 00:27:10 crc kubenswrapper[5119]: I0130 00:27:10.128593 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hlk9w_67405c86-4048-4098-a3e0-12fec1e771df/extract-content/0.log" Jan 30 00:27:10 crc kubenswrapper[5119]: I0130 00:27:10.287104 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hlk9w_67405c86-4048-4098-a3e0-12fec1e771df/extract-content/0.log" Jan 30 00:27:10 crc kubenswrapper[5119]: I0130 00:27:10.319373 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hlk9w_67405c86-4048-4098-a3e0-12fec1e771df/extract-utilities/0.log" Jan 30 00:27:10 crc kubenswrapper[5119]: I0130 00:27:10.401277 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hlk9w_67405c86-4048-4098-a3e0-12fec1e771df/registry-server/0.log" Jan 30 00:27:21 crc kubenswrapper[5119]: I0130 00:27:21.225763 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-74c9b6df9b-nkkbb_34241785-7479-467d-a949-303dc1e64d18/prometheus-operator-admission-webhook/0.log" Jan 30 00:27:21 crc kubenswrapper[5119]: I0130 00:27:21.268509 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-9bc85b4bf-9x995_6c2bf0c8-b86b-4a47-ad20-b8478b4188b0/prometheus-operator/0.log" Jan 30 00:27:21 crc kubenswrapper[5119]: I0130 00:27:21.274951 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-74c9b6df9b-pqkzc_c1c717fb-e705-4548-8aa0-823e4a6ddd8e/prometheus-operator-admission-webhook/0.log" Jan 30 00:27:21 crc kubenswrapper[5119]: I0130 00:27:21.390089 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-85c68dddb-9gt5v_30e86055-71d9-4e6e-8268-d8f123e74fb5/operator/0.log" Jan 30 00:27:21 crc kubenswrapper[5119]: I0130 00:27:21.426601 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-669c9f96b5-m42vj_8a36cf47-b445-4da0-ad4f-d9f080136a33/perses-operator/0.log" Jan 30 00:27:21 crc kubenswrapper[5119]: E0130 00:27:21.751492 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:27:32 crc kubenswrapper[5119]: E0130 00:27:32.753989 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:27:43 crc kubenswrapper[5119]: E0130 00:27:43.752876 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:27:58 crc kubenswrapper[5119]: E0130 00:27:58.760755 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.155842 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495548-cxw4v"] Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.156712 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="bddb3979-a7ee-4c6b-89c8-d33a171a67c8" containerName="oc" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.156729 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="bddb3979-a7ee-4c6b-89c8-d33a171a67c8" containerName="oc" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.156867 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="bddb3979-a7ee-4c6b-89c8-d33a171a67c8" containerName="oc" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.169674 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495548-cxw4v" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.173218 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-2vtgf\"" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.173358 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.173216 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.193865 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495548-cxw4v"] Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.248699 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbqh2\" (UniqueName: \"kubernetes.io/projected/f22136b7-7341-4bdf-9606-03ea2a667108-kube-api-access-hbqh2\") pod \"auto-csr-approver-29495548-cxw4v\" (UID: \"f22136b7-7341-4bdf-9606-03ea2a667108\") " pod="openshift-infra/auto-csr-approver-29495548-cxw4v" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.350817 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-hbqh2\" (UniqueName: \"kubernetes.io/projected/f22136b7-7341-4bdf-9606-03ea2a667108-kube-api-access-hbqh2\") pod \"auto-csr-approver-29495548-cxw4v\" (UID: \"f22136b7-7341-4bdf-9606-03ea2a667108\") " pod="openshift-infra/auto-csr-approver-29495548-cxw4v" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.374976 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbqh2\" (UniqueName: \"kubernetes.io/projected/f22136b7-7341-4bdf-9606-03ea2a667108-kube-api-access-hbqh2\") pod \"auto-csr-approver-29495548-cxw4v\" (UID: \"f22136b7-7341-4bdf-9606-03ea2a667108\") " pod="openshift-infra/auto-csr-approver-29495548-cxw4v" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.492811 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495548-cxw4v" Jan 30 00:28:00 crc kubenswrapper[5119]: I0130 00:28:00.762274 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495548-cxw4v"] Jan 30 00:28:01 crc kubenswrapper[5119]: I0130 00:28:01.399316 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495548-cxw4v" event={"ID":"f22136b7-7341-4bdf-9606-03ea2a667108","Type":"ContainerStarted","Data":"f18bc597b5010af504d1fe3542be10f5a15ccdc926dfe6ce96b035275b58311b"} Jan 30 00:28:02 crc kubenswrapper[5119]: I0130 00:28:02.407034 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495548-cxw4v" event={"ID":"f22136b7-7341-4bdf-9606-03ea2a667108","Type":"ContainerStarted","Data":"43c1e86b2d3425c7b198e5f9cfe92799a3204493af191f7c71d052409857321a"} Jan 30 00:28:02 crc kubenswrapper[5119]: I0130 00:28:02.423205 5119 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29495548-cxw4v" podStartSLOduration=1.303212874 podStartE2EDuration="2.423186823s" podCreationTimestamp="2026-01-30 00:28:00 +0000 UTC" firstStartedPulling="2026-01-30 00:28:00.781677602 +0000 UTC m=+1084.795740061" lastFinishedPulling="2026-01-30 00:28:01.901651541 +0000 UTC m=+1085.915714010" observedRunningTime="2026-01-30 00:28:02.419242515 +0000 UTC m=+1086.433304974" watchObservedRunningTime="2026-01-30 00:28:02.423186823 +0000 UTC m=+1086.437249282" Jan 30 00:28:03 crc kubenswrapper[5119]: I0130 00:28:03.413461 5119 generic.go:358] "Generic (PLEG): container finished" podID="f22136b7-7341-4bdf-9606-03ea2a667108" containerID="43c1e86b2d3425c7b198e5f9cfe92799a3204493af191f7c71d052409857321a" exitCode=0 Jan 30 00:28:03 crc kubenswrapper[5119]: I0130 00:28:03.413514 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495548-cxw4v" event={"ID":"f22136b7-7341-4bdf-9606-03ea2a667108","Type":"ContainerDied","Data":"43c1e86b2d3425c7b198e5f9cfe92799a3204493af191f7c71d052409857321a"} Jan 30 00:28:04 crc kubenswrapper[5119]: I0130 00:28:04.421509 5119 generic.go:358] "Generic (PLEG): container finished" podID="cebd7683-456a-4d39-9701-c59156900afc" containerID="3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39" exitCode=0 Jan 30 00:28:04 crc kubenswrapper[5119]: I0130 00:28:04.421607 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xhpkr/must-gather-787k4" event={"ID":"cebd7683-456a-4d39-9701-c59156900afc","Type":"ContainerDied","Data":"3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39"} Jan 30 00:28:04 crc kubenswrapper[5119]: I0130 00:28:04.422649 5119 scope.go:117] "RemoveContainer" containerID="3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39" Jan 30 00:28:04 crc kubenswrapper[5119]: I0130 00:28:04.631313 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xhpkr_must-gather-787k4_cebd7683-456a-4d39-9701-c59156900afc/gather/0.log" Jan 30 00:28:04 crc kubenswrapper[5119]: I0130 00:28:04.679163 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495548-cxw4v" Jan 30 00:28:04 crc kubenswrapper[5119]: I0130 00:28:04.711878 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbqh2\" (UniqueName: \"kubernetes.io/projected/f22136b7-7341-4bdf-9606-03ea2a667108-kube-api-access-hbqh2\") pod \"f22136b7-7341-4bdf-9606-03ea2a667108\" (UID: \"f22136b7-7341-4bdf-9606-03ea2a667108\") " Jan 30 00:28:04 crc kubenswrapper[5119]: I0130 00:28:04.722013 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f22136b7-7341-4bdf-9606-03ea2a667108-kube-api-access-hbqh2" (OuterVolumeSpecName: "kube-api-access-hbqh2") pod "f22136b7-7341-4bdf-9606-03ea2a667108" (UID: "f22136b7-7341-4bdf-9606-03ea2a667108"). InnerVolumeSpecName "kube-api-access-hbqh2". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:28:04 crc kubenswrapper[5119]: I0130 00:28:04.815218 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-hbqh2\" (UniqueName: \"kubernetes.io/projected/f22136b7-7341-4bdf-9606-03ea2a667108-kube-api-access-hbqh2\") on node \"crc\" DevicePath \"\"" Jan 30 00:28:05 crc kubenswrapper[5119]: I0130 00:28:05.430800 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495548-cxw4v" Jan 30 00:28:05 crc kubenswrapper[5119]: I0130 00:28:05.431560 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495548-cxw4v" event={"ID":"f22136b7-7341-4bdf-9606-03ea2a667108","Type":"ContainerDied","Data":"f18bc597b5010af504d1fe3542be10f5a15ccdc926dfe6ce96b035275b58311b"} Jan 30 00:28:05 crc kubenswrapper[5119]: I0130 00:28:05.431586 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f18bc597b5010af504d1fe3542be10f5a15ccdc926dfe6ce96b035275b58311b" Jan 30 00:28:05 crc kubenswrapper[5119]: I0130 00:28:05.480471 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29495542-m9k4q"] Jan 30 00:28:05 crc kubenswrapper[5119]: I0130 00:28:05.487065 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29495542-m9k4q"] Jan 30 00:28:06 crc kubenswrapper[5119]: I0130 00:28:06.763030 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43a2d1a2-e717-4e09-90e7-6200bb5ce8c2" path="/var/lib/kubelet/pods/43a2d1a2-e717-4e09-90e7-6200bb5ce8c2/volumes" Jan 30 00:28:09 crc kubenswrapper[5119]: E0130 00:28:09.985235 5119 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:28:09 crc kubenswrapper[5119]: E0130 00:28:09.986570 5119 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z96jn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6_openshift-marketplace(4e1cb387-b40d-4ab0-867c-b468c70b7ae8): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:28:09 crc kubenswrapper[5119]: E0130 00:28:09.987998 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:28:10 crc kubenswrapper[5119]: I0130 00:28:10.881678 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xhpkr/must-gather-787k4"] Jan 30 00:28:10 crc kubenswrapper[5119]: I0130 00:28:10.881801 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xhpkr/must-gather-787k4"] Jan 30 00:28:10 crc kubenswrapper[5119]: I0130 00:28:10.882203 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-must-gather-xhpkr/must-gather-787k4" podUID="cebd7683-456a-4d39-9701-c59156900afc" containerName="copy" containerID="cri-o://bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1" gracePeriod=2 Jan 30 00:28:10 crc kubenswrapper[5119]: I0130 00:28:10.884977 5119 status_manager.go:895] "Failed to get status for pod" podUID="cebd7683-456a-4d39-9701-c59156900afc" pod="openshift-must-gather-xhpkr/must-gather-787k4" err="pods \"must-gather-787k4\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-must-gather-xhpkr\": no relationship found between node 'crc' and this object" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.293789 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xhpkr_must-gather-787k4_cebd7683-456a-4d39-9701-c59156900afc/copy/0.log" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.294667 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xhpkr/must-gather-787k4" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.296436 5119 status_manager.go:895] "Failed to get status for pod" podUID="cebd7683-456a-4d39-9701-c59156900afc" pod="openshift-must-gather-xhpkr/must-gather-787k4" err="pods \"must-gather-787k4\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-must-gather-xhpkr\": no relationship found between node 'crc' and this object" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.412735 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkwpp\" (UniqueName: \"kubernetes.io/projected/cebd7683-456a-4d39-9701-c59156900afc-kube-api-access-lkwpp\") pod \"cebd7683-456a-4d39-9701-c59156900afc\" (UID: \"cebd7683-456a-4d39-9701-c59156900afc\") " Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.412793 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cebd7683-456a-4d39-9701-c59156900afc-must-gather-output\") pod \"cebd7683-456a-4d39-9701-c59156900afc\" (UID: \"cebd7683-456a-4d39-9701-c59156900afc\") " Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.421986 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cebd7683-456a-4d39-9701-c59156900afc-kube-api-access-lkwpp" (OuterVolumeSpecName: "kube-api-access-lkwpp") pod "cebd7683-456a-4d39-9701-c59156900afc" (UID: "cebd7683-456a-4d39-9701-c59156900afc"). InnerVolumeSpecName "kube-api-access-lkwpp". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.460271 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cebd7683-456a-4d39-9701-c59156900afc-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "cebd7683-456a-4d39-9701-c59156900afc" (UID: "cebd7683-456a-4d39-9701-c59156900afc"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.486070 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xhpkr_must-gather-787k4_cebd7683-456a-4d39-9701-c59156900afc/copy/0.log" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.486793 5119 generic.go:358] "Generic (PLEG): container finished" podID="cebd7683-456a-4d39-9701-c59156900afc" containerID="bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1" exitCode=143 Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.487088 5119 scope.go:117] "RemoveContainer" containerID="bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.487257 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xhpkr/must-gather-787k4" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.490036 5119 status_manager.go:895] "Failed to get status for pod" podUID="cebd7683-456a-4d39-9701-c59156900afc" pod="openshift-must-gather-xhpkr/must-gather-787k4" err="pods \"must-gather-787k4\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-must-gather-xhpkr\": no relationship found between node 'crc' and this object" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.506425 5119 status_manager.go:895] "Failed to get status for pod" podUID="cebd7683-456a-4d39-9701-c59156900afc" pod="openshift-must-gather-xhpkr/must-gather-787k4" err="pods \"must-gather-787k4\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-must-gather-xhpkr\": no relationship found between node 'crc' and this object" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.511132 5119 scope.go:117] "RemoveContainer" containerID="3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.513809 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-lkwpp\" (UniqueName: \"kubernetes.io/projected/cebd7683-456a-4d39-9701-c59156900afc-kube-api-access-lkwpp\") on node \"crc\" DevicePath \"\"" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.513827 5119 reconciler_common.go:299] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cebd7683-456a-4d39-9701-c59156900afc-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.576150 5119 scope.go:117] "RemoveContainer" containerID="bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1" Jan 30 00:28:11 crc kubenswrapper[5119]: E0130 00:28:11.576860 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1\": container with ID starting with bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1 not found: ID does not exist" containerID="bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.576901 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1"} err="failed to get container status \"bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1\": rpc error: code = NotFound desc = could not find container \"bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1\": container with ID starting with bc8e6c09ed68c9c0b7e97aba7b8e3baca1836a63d40bd3d0909525c16dcdc2c1 not found: ID does not exist" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.576950 5119 scope.go:117] "RemoveContainer" containerID="3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39" Jan 30 00:28:11 crc kubenswrapper[5119]: E0130 00:28:11.577412 5119 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39\": container with ID starting with 3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39 not found: ID does not exist" containerID="3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39" Jan 30 00:28:11 crc kubenswrapper[5119]: I0130 00:28:11.577458 5119 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39"} err="failed to get container status \"3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39\": rpc error: code = NotFound desc = could not find container \"3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39\": container with ID starting with 3a26e338397dab14e640d9916992bd0ae4dec0b30a1aa0ee606b1aad6d340e39 not found: ID does not exist" Jan 30 00:28:12 crc kubenswrapper[5119]: I0130 00:28:12.758762 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cebd7683-456a-4d39-9701-c59156900afc" path="/var/lib/kubelet/pods/cebd7683-456a-4d39-9701-c59156900afc/volumes" Jan 30 00:28:21 crc kubenswrapper[5119]: E0130 00:28:21.751016 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:28:34 crc kubenswrapper[5119]: E0130 00:28:34.751933 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:28:47 crc kubenswrapper[5119]: E0130 00:28:47.753179 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:28:54 crc kubenswrapper[5119]: I0130 00:28:54.371065 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:28:54 crc kubenswrapper[5119]: I0130 00:28:54.372448 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:28:58 crc kubenswrapper[5119]: I0130 00:28:58.211331 5119 scope.go:117] "RemoveContainer" containerID="e4ac56782ed3b44bd9060d377a40455e0d4008492ff6b3496189c932800b1633" Jan 30 00:29:01 crc kubenswrapper[5119]: E0130 00:29:01.750702 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:29:13 crc kubenswrapper[5119]: E0130 00:29:13.751871 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:29:24 crc kubenswrapper[5119]: I0130 00:29:24.371645 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:29:24 crc kubenswrapper[5119]: I0130 00:29:24.372229 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:29:25 crc kubenswrapper[5119]: E0130 00:29:25.752161 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:29:36 crc kubenswrapper[5119]: E0130 00:29:36.756695 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:29:48 crc kubenswrapper[5119]: E0130 00:29:48.752459 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:29:54 crc kubenswrapper[5119]: I0130 00:29:54.370863 5119 patch_prober.go:28] interesting pod/machine-config-daemon-hf5dd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:29:54 crc kubenswrapper[5119]: I0130 00:29:54.371214 5119 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:29:54 crc kubenswrapper[5119]: I0130 00:29:54.371291 5119 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" Jan 30 00:29:54 crc kubenswrapper[5119]: I0130 00:29:54.372194 5119 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c8193f46715a0a2c916f33ff7a2bc9590d142e992cb56399f41f212e20577e67"} pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:29:54 crc kubenswrapper[5119]: I0130 00:29:54.372310 5119 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" podUID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerName="machine-config-daemon" containerID="cri-o://c8193f46715a0a2c916f33ff7a2bc9590d142e992cb56399f41f212e20577e67" gracePeriod=600 Jan 30 00:29:55 crc kubenswrapper[5119]: I0130 00:29:55.255173 5119 generic.go:358] "Generic (PLEG): container finished" podID="dff39619-cf4b-4c00-8d99-71c924fcf4c2" containerID="c8193f46715a0a2c916f33ff7a2bc9590d142e992cb56399f41f212e20577e67" exitCode=0 Jan 30 00:29:55 crc kubenswrapper[5119]: I0130 00:29:55.255214 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerDied","Data":"c8193f46715a0a2c916f33ff7a2bc9590d142e992cb56399f41f212e20577e67"} Jan 30 00:29:55 crc kubenswrapper[5119]: I0130 00:29:55.255752 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hf5dd" event={"ID":"dff39619-cf4b-4c00-8d99-71c924fcf4c2","Type":"ContainerStarted","Data":"89bfdbe0313b6753caa95d21a8bdd6d066c8403f07b89266bb4f9b8ed2e1cbcd"} Jan 30 00:29:55 crc kubenswrapper[5119]: I0130 00:29:55.255772 5119 scope.go:117] "RemoveContainer" containerID="9ec78e1c0b5b60bf7c2e8eae0e051e92b3b72af9f5c5d94190fc2bc39b3b1a3e" Jan 30 00:29:57 crc kubenswrapper[5119]: I0130 00:29:57.134535 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qxpww_0cf99dcb-47cd-4077-9fb1-e39bf209e431/kube-multus/0.log" Jan 30 00:29:57 crc kubenswrapper[5119]: I0130 00:29:57.139523 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qxpww_0cf99dcb-47cd-4077-9fb1-e39bf209e431/kube-multus/0.log" Jan 30 00:29:57 crc kubenswrapper[5119]: I0130 00:29:57.141730 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log" Jan 30 00:29:57 crc kubenswrapper[5119]: I0130 00:29:57.144905 5119 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.134541 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5"] Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.135472 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="cebd7683-456a-4d39-9701-c59156900afc" containerName="gather" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.135489 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="cebd7683-456a-4d39-9701-c59156900afc" containerName="gather" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.135499 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="cebd7683-456a-4d39-9701-c59156900afc" containerName="copy" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.135505 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="cebd7683-456a-4d39-9701-c59156900afc" containerName="copy" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.135524 5119 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f22136b7-7341-4bdf-9606-03ea2a667108" containerName="oc" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.135530 5119 state_mem.go:107] "Deleted CPUSet assignment" podUID="f22136b7-7341-4bdf-9606-03ea2a667108" containerName="oc" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.135659 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="cebd7683-456a-4d39-9701-c59156900afc" containerName="copy" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.135668 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="cebd7683-456a-4d39-9701-c59156900afc" containerName="gather" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.135682 5119 memory_manager.go:356] "RemoveStaleState removing state" podUID="f22136b7-7341-4bdf-9606-03ea2a667108" containerName="oc" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.142005 5119 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495550-lprct"] Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.142248 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.145070 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-config\"" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.145096 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-dockercfg-vfqp6\"" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.146976 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495550-lprct" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.148991 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495550-lprct"] Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.149781 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.150020 5119 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-2vtgf\"" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.150193 5119 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.155437 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5"] Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.258442 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9526850-9ccb-4819-85f3-ab7fffdb2a66-secret-volume\") pod \"collect-profiles-29495550-xjmz5\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.258823 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9526850-9ccb-4819-85f3-ab7fffdb2a66-config-volume\") pod \"collect-profiles-29495550-xjmz5\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.258994 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xk7q8\" (UniqueName: \"kubernetes.io/projected/fec54d77-0a83-4565-8eb4-d1d13e280d68-kube-api-access-xk7q8\") pod \"auto-csr-approver-29495550-lprct\" (UID: \"fec54d77-0a83-4565-8eb4-d1d13e280d68\") " pod="openshift-infra/auto-csr-approver-29495550-lprct" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.259553 5119 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tchw\" (UniqueName: \"kubernetes.io/projected/c9526850-9ccb-4819-85f3-ab7fffdb2a66-kube-api-access-2tchw\") pod \"collect-profiles-29495550-xjmz5\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.360757 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9526850-9ccb-4819-85f3-ab7fffdb2a66-config-volume\") pod \"collect-profiles-29495550-xjmz5\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.361048 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-xk7q8\" (UniqueName: \"kubernetes.io/projected/fec54d77-0a83-4565-8eb4-d1d13e280d68-kube-api-access-xk7q8\") pod \"auto-csr-approver-29495550-lprct\" (UID: \"fec54d77-0a83-4565-8eb4-d1d13e280d68\") " pod="openshift-infra/auto-csr-approver-29495550-lprct" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.361156 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2tchw\" (UniqueName: \"kubernetes.io/projected/c9526850-9ccb-4819-85f3-ab7fffdb2a66-kube-api-access-2tchw\") pod \"collect-profiles-29495550-xjmz5\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.361376 5119 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9526850-9ccb-4819-85f3-ab7fffdb2a66-secret-volume\") pod \"collect-profiles-29495550-xjmz5\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.361735 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9526850-9ccb-4819-85f3-ab7fffdb2a66-config-volume\") pod \"collect-profiles-29495550-xjmz5\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.368550 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9526850-9ccb-4819-85f3-ab7fffdb2a66-secret-volume\") pod \"collect-profiles-29495550-xjmz5\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.377881 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tchw\" (UniqueName: \"kubernetes.io/projected/c9526850-9ccb-4819-85f3-ab7fffdb2a66-kube-api-access-2tchw\") pod \"collect-profiles-29495550-xjmz5\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.386737 5119 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-xk7q8\" (UniqueName: \"kubernetes.io/projected/fec54d77-0a83-4565-8eb4-d1d13e280d68-kube-api-access-xk7q8\") pod \"auto-csr-approver-29495550-lprct\" (UID: \"fec54d77-0a83-4565-8eb4-d1d13e280d68\") " pod="openshift-infra/auto-csr-approver-29495550-lprct" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.462579 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.483404 5119 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495550-lprct" Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.705740 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495550-lprct"] Jan 30 00:30:00 crc kubenswrapper[5119]: W0130 00:30:00.708963 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfec54d77_0a83_4565_8eb4_d1d13e280d68.slice/crio-a893187b8f6213c1ff5998ebe4f8870bbb814e67a0ee8600a11a6711b2cfaf04 WatchSource:0}: Error finding container a893187b8f6213c1ff5998ebe4f8870bbb814e67a0ee8600a11a6711b2cfaf04: Status 404 returned error can't find the container with id a893187b8f6213c1ff5998ebe4f8870bbb814e67a0ee8600a11a6711b2cfaf04 Jan 30 00:30:00 crc kubenswrapper[5119]: I0130 00:30:00.859469 5119 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5"] Jan 30 00:30:00 crc kubenswrapper[5119]: W0130 00:30:00.866447 5119 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9526850_9ccb_4819_85f3_ab7fffdb2a66.slice/crio-e731384ecc3f255be257dbd6230e6924cebfa0b42b4b6432fdddd01c0894797e WatchSource:0}: Error finding container e731384ecc3f255be257dbd6230e6924cebfa0b42b4b6432fdddd01c0894797e: Status 404 returned error can't find the container with id e731384ecc3f255be257dbd6230e6924cebfa0b42b4b6432fdddd01c0894797e Jan 30 00:30:01 crc kubenswrapper[5119]: I0130 00:30:01.306506 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495550-lprct" event={"ID":"fec54d77-0a83-4565-8eb4-d1d13e280d68","Type":"ContainerStarted","Data":"a893187b8f6213c1ff5998ebe4f8870bbb814e67a0ee8600a11a6711b2cfaf04"} Jan 30 00:30:01 crc kubenswrapper[5119]: I0130 00:30:01.308181 5119 generic.go:358] "Generic (PLEG): container finished" podID="c9526850-9ccb-4819-85f3-ab7fffdb2a66" containerID="a747eb37bad23f07c6a03aa1ba686c9a8084fbecc4040ad21f79effad7a9d3a3" exitCode=0 Jan 30 00:30:01 crc kubenswrapper[5119]: I0130 00:30:01.308271 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" event={"ID":"c9526850-9ccb-4819-85f3-ab7fffdb2a66","Type":"ContainerDied","Data":"a747eb37bad23f07c6a03aa1ba686c9a8084fbecc4040ad21f79effad7a9d3a3"} Jan 30 00:30:01 crc kubenswrapper[5119]: I0130 00:30:01.308316 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" event={"ID":"c9526850-9ccb-4819-85f3-ab7fffdb2a66","Type":"ContainerStarted","Data":"e731384ecc3f255be257dbd6230e6924cebfa0b42b4b6432fdddd01c0894797e"} Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.318484 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495550-lprct" event={"ID":"fec54d77-0a83-4565-8eb4-d1d13e280d68","Type":"ContainerStarted","Data":"8ef29ec579f49d4d3341378ec5f2595e8675de845a52ee26b5c4e72c01ef2aa0"} Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.556268 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.595602 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9526850-9ccb-4819-85f3-ab7fffdb2a66-config-volume\") pod \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.595737 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tchw\" (UniqueName: \"kubernetes.io/projected/c9526850-9ccb-4819-85f3-ab7fffdb2a66-kube-api-access-2tchw\") pod \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.595766 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9526850-9ccb-4819-85f3-ab7fffdb2a66-secret-volume\") pod \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\" (UID: \"c9526850-9ccb-4819-85f3-ab7fffdb2a66\") " Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.596299 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9526850-9ccb-4819-85f3-ab7fffdb2a66-config-volume" (OuterVolumeSpecName: "config-volume") pod "c9526850-9ccb-4819-85f3-ab7fffdb2a66" (UID: "c9526850-9ccb-4819-85f3-ab7fffdb2a66"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.602857 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9526850-9ccb-4819-85f3-ab7fffdb2a66-kube-api-access-2tchw" (OuterVolumeSpecName: "kube-api-access-2tchw") pod "c9526850-9ccb-4819-85f3-ab7fffdb2a66" (UID: "c9526850-9ccb-4819-85f3-ab7fffdb2a66"). InnerVolumeSpecName "kube-api-access-2tchw". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.603638 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9526850-9ccb-4819-85f3-ab7fffdb2a66-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c9526850-9ccb-4819-85f3-ab7fffdb2a66" (UID: "c9526850-9ccb-4819-85f3-ab7fffdb2a66"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.697512 5119 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9526850-9ccb-4819-85f3-ab7fffdb2a66-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.697568 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-2tchw\" (UniqueName: \"kubernetes.io/projected/c9526850-9ccb-4819-85f3-ab7fffdb2a66-kube-api-access-2tchw\") on node \"crc\" DevicePath \"\"" Jan 30 00:30:02 crc kubenswrapper[5119]: I0130 00:30:02.697580 5119 reconciler_common.go:299] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9526850-9ccb-4819-85f3-ab7fffdb2a66-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:30:02 crc kubenswrapper[5119]: E0130 00:30:02.757255 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:30:03 crc kubenswrapper[5119]: I0130 00:30:03.325282 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" Jan 30 00:30:03 crc kubenswrapper[5119]: I0130 00:30:03.325299 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-xjmz5" event={"ID":"c9526850-9ccb-4819-85f3-ab7fffdb2a66","Type":"ContainerDied","Data":"e731384ecc3f255be257dbd6230e6924cebfa0b42b4b6432fdddd01c0894797e"} Jan 30 00:30:03 crc kubenswrapper[5119]: I0130 00:30:03.325731 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e731384ecc3f255be257dbd6230e6924cebfa0b42b4b6432fdddd01c0894797e" Jan 30 00:30:03 crc kubenswrapper[5119]: I0130 00:30:03.326883 5119 generic.go:358] "Generic (PLEG): container finished" podID="fec54d77-0a83-4565-8eb4-d1d13e280d68" containerID="8ef29ec579f49d4d3341378ec5f2595e8675de845a52ee26b5c4e72c01ef2aa0" exitCode=0 Jan 30 00:30:03 crc kubenswrapper[5119]: I0130 00:30:03.326987 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495550-lprct" event={"ID":"fec54d77-0a83-4565-8eb4-d1d13e280d68","Type":"ContainerDied","Data":"8ef29ec579f49d4d3341378ec5f2595e8675de845a52ee26b5c4e72c01ef2aa0"} Jan 30 00:30:03 crc kubenswrapper[5119]: I0130 00:30:03.525926 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495550-lprct" Jan 30 00:30:03 crc kubenswrapper[5119]: I0130 00:30:03.607298 5119 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xk7q8\" (UniqueName: \"kubernetes.io/projected/fec54d77-0a83-4565-8eb4-d1d13e280d68-kube-api-access-xk7q8\") pod \"fec54d77-0a83-4565-8eb4-d1d13e280d68\" (UID: \"fec54d77-0a83-4565-8eb4-d1d13e280d68\") " Jan 30 00:30:03 crc kubenswrapper[5119]: I0130 00:30:03.612189 5119 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fec54d77-0a83-4565-8eb4-d1d13e280d68-kube-api-access-xk7q8" (OuterVolumeSpecName: "kube-api-access-xk7q8") pod "fec54d77-0a83-4565-8eb4-d1d13e280d68" (UID: "fec54d77-0a83-4565-8eb4-d1d13e280d68"). InnerVolumeSpecName "kube-api-access-xk7q8". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:30:03 crc kubenswrapper[5119]: I0130 00:30:03.708980 5119 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xk7q8\" (UniqueName: \"kubernetes.io/projected/fec54d77-0a83-4565-8eb4-d1d13e280d68-kube-api-access-xk7q8\") on node \"crc\" DevicePath \"\"" Jan 30 00:30:04 crc kubenswrapper[5119]: I0130 00:30:04.334920 5119 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495550-lprct" Jan 30 00:30:04 crc kubenswrapper[5119]: I0130 00:30:04.334919 5119 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495550-lprct" event={"ID":"fec54d77-0a83-4565-8eb4-d1d13e280d68","Type":"ContainerDied","Data":"a893187b8f6213c1ff5998ebe4f8870bbb814e67a0ee8600a11a6711b2cfaf04"} Jan 30 00:30:04 crc kubenswrapper[5119]: I0130 00:30:04.335051 5119 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a893187b8f6213c1ff5998ebe4f8870bbb814e67a0ee8600a11a6711b2cfaf04" Jan 30 00:30:04 crc kubenswrapper[5119]: I0130 00:30:04.580430 5119 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29495544-b4628"] Jan 30 00:30:04 crc kubenswrapper[5119]: I0130 00:30:04.584067 5119 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29495544-b4628"] Jan 30 00:30:04 crc kubenswrapper[5119]: I0130 00:30:04.758279 5119 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe9ce46-6464-4a80-82d1-73ac2aab552e" path="/var/lib/kubelet/pods/5fe9ce46-6464-4a80-82d1-73ac2aab552e/volumes" Jan 30 00:30:15 crc kubenswrapper[5119]: E0130 00:30:15.751933 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" Jan 30 00:30:28 crc kubenswrapper[5119]: I0130 00:30:28.752477 5119 provider.go:93] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 00:30:28 crc kubenswrapper[5119]: E0130 00:30:28.753210 5119 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e92dh6" podUID="4e1cb387-b40d-4ab0-867c-b468c70b7ae8" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136775661024466 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136775662017404 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136772715016523 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136772715015473 5ustar corecore